- Voorlopige (werkende) setup tem verification service, bunny integratie, ...
This commit is contained in:
@@ -166,8 +166,6 @@ class Config(object):
|
||||
# Environemnt Loaders
|
||||
OPENAI_API_KEY = environ.get('OPENAI_API_KEY')
|
||||
MISTRAL_API_KEY = environ.get('MISTRAL_API_KEY')
|
||||
GROQ_API_KEY = environ.get('GROQ_API_KEY')
|
||||
ANTHROPIC_API_KEY = environ.get('ANTHROPIC_API_KEY')
|
||||
|
||||
# Celery settings
|
||||
CELERY_TASK_SERIALIZER = 'json'
|
||||
@@ -329,9 +327,11 @@ class StagingConfig(Config):
|
||||
# UPLOAD_FOLDER = '/app/tenant_files'
|
||||
|
||||
# Redis Settings
|
||||
REDIS_URL = 'redis'
|
||||
REDIS_PORT = '6379'
|
||||
REDIS_BASE_URI = f'redis://{REDIS_URL}:{REDIS_PORT}'
|
||||
REDIS_URL = environ.get('REDIS_URL')
|
||||
REDIS_PORT = environ.get('REDIS_PORT', '6379')
|
||||
REDIS_USER = environ.get('REDIS_USER')
|
||||
REDIS_PASS = environ.get('REDIS_PASS')
|
||||
REDIS_BASE_URI = f'rediss://{REDIS_USER}:{REDIS_PASS}@{REDIS_URL}:{REDIS_PORT}'
|
||||
|
||||
# Celery settings
|
||||
# eveai_app Redis Settings
|
||||
@@ -358,9 +358,9 @@ class StagingConfig(Config):
|
||||
OBJECT_STORAGE_TENANT_BASE = 'Folder'
|
||||
OBJECT_STORAGE_BUCKET_NAME = 'eveai-staging'
|
||||
# MINIO
|
||||
MINIO_ENDPOINT = 'https://eveai-staging.s3.fr-par.scw.cloud'
|
||||
MINIO_ACCESS_KEY = environ.get('SCALEWAY_EVEAI_STAGING_ACCESS_KEY')
|
||||
MINIO_SECRET_KEY = environ.get('SCALEWAY_EVEAI_STAGING_SECRET_KEY')
|
||||
MINIO_ENDPOINT = environ.get('MINIO_ENDPOINT')
|
||||
MINIO_ACCESS_KEY = environ.get('MINIO_ACCESS_KEY')
|
||||
MINIO_SECRET_KEY = environ.get('MINIO_SECRET_KEY')
|
||||
MINIO_USE_HTTPS = True
|
||||
|
||||
|
||||
|
||||
@@ -14,17 +14,13 @@ x-common-variables: &common-variables
|
||||
FLOWER_USER: 'Felucia'
|
||||
FLOWER_PASSWORD: 'Jungles'
|
||||
OPENAI_API_KEY: 'sk-proj-8R0jWzwjL7PeoPyMhJTZT3BlbkFJLb6HfRB2Hr9cEVFWEhU7'
|
||||
GROQ_API_KEY: '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'
|
||||
MISTRAL_API_KEY: '0f4ZiQ1kIpgIKTHX8d0a8GOD2vAgVqEn'
|
||||
ANTHROPIC_API_KEY: 'sk-ant-api03-c2TmkzbReeGhXBO5JxNH6BJNylRDonc9GmZd0eRbrvyekec2'
|
||||
JWT_SECRET_KEY: 'bsdMkmQ8ObfMD52yAFg4trrvjgjMhuIqg2fjDpD/JqvgY0ccCcmlsEnVFmR79WPiLKEA3i8a5zmejwLZKl4v9Q=='
|
||||
API_ENCRYPTION_KEY: 'xfF5369IsredSrlrYZqkM9ZNrfUASYYS6TCcAR9UKj4='
|
||||
MINIO_ENDPOINT: minio:9000
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
NGINX_SERVER_NAME: 'localhost http://macstudio.ask-eve-ai-local.com/'
|
||||
LANGCHAIN_API_KEY: "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"
|
||||
SERPER_API_KEY: "e4c553856d0e6b5a171ec5e6b69d874285b9badf"
|
||||
CREWAI_STORAGE_DIR: "/app/crewai_storage"
|
||||
PUSH_GATEWAY_HOST: "pushgateway"
|
||||
PUSH_GATEWAY_PORT: "9091"
|
||||
|
||||
@@ -29,8 +29,6 @@ x-common-variables: &common-variables
|
||||
FLOWER_USER: 'Felucia'
|
||||
FLOWER_PASSWORD: 'Jungles'
|
||||
OPENAI_API_KEY: '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'
|
||||
GROQ_API_KEY: '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'
|
||||
ANTHROPIC_API_KEY: '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'
|
||||
MISTRAL_API_KEY: 'PjnUeDRPD7B144wdHlH0CzR7m0z8RHXi'
|
||||
JWT_SECRET_KEY: '0d99e810e686ea567ef305d8e9b06195c4db482952e19276590a726cde60a408'
|
||||
API_ENCRYPTION_KEY: 'Ly5XYWwEKiasfAwEqdEMdwR-k0vhrq6QPYd4whEROB0='
|
||||
@@ -40,8 +38,6 @@ x-common-variables: &common-variables
|
||||
MINIO_ACCESS_KEY: 04JKmQln8PQpyTmMiCPc
|
||||
MINIO_SECRET_KEY: 2PEZAD1nlpAmOyDV0TUTuJTQw1qVuYLF3A7GMs0D
|
||||
NGINX_SERVER_NAME: 'evie.askeveai.com mxz536.stackhero-network.com'
|
||||
LANGCHAIN_API_KEY: "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"
|
||||
SERPER_API_KEY: "e4c553856d0e6b5a171ec5e6b69d874285b9badf"
|
||||
CREWAI_STORAGE_DIR: "/app/crewai_storage"
|
||||
|
||||
networks:
|
||||
|
||||
238
docker/tag_registry_version.sh
Executable file
238
docker/tag_registry_version.sh
Executable file
@@ -0,0 +1,238 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Function to display usage information
|
||||
usage() {
|
||||
echo "Usage: $0 <version> [options]"
|
||||
echo " version : Version to tag (e.g., v1.2.3, v1.2.3-alpha, v2.0.0-beta)"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --services <service1,service2,...> : Specific services to tag (default: all EveAI services)"
|
||||
echo " --dry-run : Show what would be done without executing"
|
||||
echo " --force : Overwrite existing version tags"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 v1.2.3-alpha"
|
||||
echo " $0 v2.0.0 --services eveai_api,eveai_workers"
|
||||
echo " $0 v1.0.0-beta --dry-run"
|
||||
}
|
||||
|
||||
# Check if version is provided
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "❌ Error: Version is required"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
shift
|
||||
|
||||
# Default values
|
||||
SERVICES=""
|
||||
DRY_RUN=false
|
||||
FORCE=false
|
||||
|
||||
# Parse options
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--services)
|
||||
SERVICES="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--force)
|
||||
FORCE=true
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "❌ Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected argument: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate version format (flexible semantic versioning)
|
||||
if [[ ! "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9\-]+)?$ ]]; then
|
||||
echo "❌ Error: Invalid version format. Expected format: v1.2.3 or v1.2.3-alpha"
|
||||
echo " Examples: v1.0.0, v2.1.3-beta, v1.0.0-rc1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure version starts with 'v'
|
||||
if [[ ! "$VERSION" =~ ^v ]]; then
|
||||
VERSION="v$VERSION"
|
||||
fi
|
||||
|
||||
# Local registry configuration
|
||||
REGISTRY="registry.ask-eve-ai-local.com"
|
||||
ACCOUNT="josakola"
|
||||
|
||||
# Check if podman is available
|
||||
if ! command -v podman &> /dev/null; then
|
||||
echo "❌ Error: podman not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if yq is available
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo "❌ Error: yq not found (required for parsing compose file)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if compose file exists
|
||||
COMPOSE_FILE="compose_dev.yaml"
|
||||
if [[ ! -f "$COMPOSE_FILE" ]]; then
|
||||
echo "❌ Error: Compose file '$COMPOSE_FILE' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🏷️ EveAI Registry Version Tagging Script"
|
||||
echo "📦 Version: $VERSION"
|
||||
echo "🏪 Registry: $REGISTRY"
|
||||
echo "👤 Account: $ACCOUNT"
|
||||
|
||||
# Get services to process
|
||||
if [[ -n "$SERVICES" ]]; then
|
||||
# Convert comma-separated list to array
|
||||
IFS=',' read -ra SERVICE_ARRAY <<< "$SERVICES"
|
||||
else
|
||||
# Get all EveAI services (excluding nginx as per requirements)
|
||||
SERVICE_ARRAY=()
|
||||
while IFS= read -r line; do
|
||||
SERVICE_ARRAY+=("$line")
|
||||
done < <(yq e '.services | keys | .[]' "$COMPOSE_FILE" | grep -E '^eveai_')
|
||||
fi
|
||||
|
||||
echo "🔍 Services to process: ${SERVICE_ARRAY[*]}"
|
||||
|
||||
# Function to check if image exists in registry
|
||||
check_image_exists() {
|
||||
local image_name="$1"
|
||||
if podman image exists "$image_name" 2>/dev/null; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if version tag already exists
|
||||
check_version_exists() {
|
||||
local service="$1"
|
||||
local version_tag="$REGISTRY/$ACCOUNT/$service:$VERSION"
|
||||
|
||||
# Try to inspect the image in the registry
|
||||
if podman image exists "$version_tag" 2>/dev/null; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Process each service
|
||||
PROCESSED_SERVICES=()
|
||||
FAILED_SERVICES=()
|
||||
|
||||
for SERVICE in "${SERVICE_ARRAY[@]}"; do
|
||||
echo ""
|
||||
echo "🔄 Processing service: $SERVICE"
|
||||
|
||||
# Check if service exists in compose file
|
||||
if ! yq e ".services.$SERVICE" "$COMPOSE_FILE" | grep -q "image:"; then
|
||||
echo "⚠️ Warning: Service '$SERVICE' not found in $COMPOSE_FILE, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Construct image names
|
||||
LATEST_IMAGE="$REGISTRY/$ACCOUNT/$SERVICE:latest"
|
||||
VERSION_IMAGE="$REGISTRY/$ACCOUNT/$SERVICE:$VERSION"
|
||||
|
||||
echo " 📥 Source: $LATEST_IMAGE"
|
||||
echo " 🏷️ Target: $VERSION_IMAGE"
|
||||
|
||||
# Check if version already exists
|
||||
if check_version_exists "$SERVICE" && [[ "$FORCE" != true ]]; then
|
||||
echo " ⚠️ Version $VERSION already exists for $SERVICE"
|
||||
echo " 💡 Use --force to overwrite existing tags"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " 🔍 [DRY RUN] Would tag $LATEST_IMAGE as $VERSION_IMAGE"
|
||||
PROCESSED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if latest image exists
|
||||
if ! check_image_exists "$LATEST_IMAGE"; then
|
||||
echo " ❌ Latest image not found: $LATEST_IMAGE"
|
||||
echo " 💡 Run build_and_push_eveai.sh first to create latest images"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Pull latest image
|
||||
echo " 📥 Pulling latest image..."
|
||||
if ! podman pull "$LATEST_IMAGE"; then
|
||||
echo " ❌ Failed to pull $LATEST_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Tag with version
|
||||
echo " 🏷️ Tagging with version $VERSION..."
|
||||
if ! podman tag "$LATEST_IMAGE" "$VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to tag $LATEST_IMAGE as $VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Push version tag to registry
|
||||
echo " 📤 Pushing version tag to registry..."
|
||||
if ! podman push "$VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to push $VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
echo " ✅ Successfully tagged $SERVICE with version $VERSION"
|
||||
PROCESSED_SERVICES+=("$SERVICE")
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo "📊 Summary:"
|
||||
echo "✅ Successfully processed: ${#PROCESSED_SERVICES[@]} services"
|
||||
if [[ ${#PROCESSED_SERVICES[@]} -gt 0 ]]; then
|
||||
printf " - %s\n" "${PROCESSED_SERVICES[@]}"
|
||||
fi
|
||||
|
||||
if [[ ${#FAILED_SERVICES[@]} -gt 0 ]]; then
|
||||
echo "❌ Failed: ${#FAILED_SERVICES[@]} services"
|
||||
printf " - %s\n" "${FAILED_SERVICES[@]}"
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo "🔍 This was a dry run - no actual changes were made"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [[ ${#FAILED_SERVICES[@]} -eq 0 ]]; then
|
||||
echo "🎉 All services successfully tagged with version $VERSION!"
|
||||
echo "📦 Images are available in registry: $REGISTRY/$ACCOUNT/[service]:$VERSION"
|
||||
else
|
||||
echo "⚠️ Some services failed to process. Check the errors above."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕐 Finished at $(date +"%d/%m/%Y %H:%M:%S")"
|
||||
159
documentation/Production Setup/cluster-install.md
Normal file
159
documentation/Production Setup/cluster-install.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# Cluster Install
|
||||
|
||||
## Fase 1: Ingress Controller Setup
|
||||
### Stap 1: Installeer de NGINX Ingress Controller
|
||||
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.2/deploy/static/provider/cloud/deploy.yaml
|
||||
```
|
||||
|
||||
### Stap 2: Verifieer de Installatie
|
||||
Kijk of de namespace is aangemaakt
|
||||
|
||||
```
|
||||
kubectl get namespaces | grep ingress-nginx
|
||||
```
|
||||
|
||||
Check of de pods worden gestart
|
||||
|
||||
```
|
||||
kubectl get pods -n ingress-nginx
|
||||
```
|
||||
|
||||
Check de services (dit is het belangrijkste!)
|
||||
|
||||
```
|
||||
kubectl get services -n ingress-nginx
|
||||
```
|
||||
|
||||
Je zou zoiets als dit moeten zien:
|
||||
|
||||
```
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
ingress-nginx-controller NodePort 10.43.xxx.xxx <none> 80:30080/TCP,443:30443/TCP 1m
|
||||
```
|
||||
|
||||
Watch de pods tot ze ready zijn
|
||||
|
||||
```
|
||||
kubectl get pods -n ingress-nginx -w
|
||||
```
|
||||
|
||||
Stop met Ctrl+C als je dit ziet:
|
||||
|
||||
```
|
||||
ingress-nginx-controller-xxx 1/1 Running 0 2m
|
||||
```
|
||||
|
||||
Check de NodePorts, dit is cruciaal voor je Scaleway LoadBalancer configuratie:
|
||||
|
||||
```
|
||||
kubectl get service ingress-nginx-controller -n ingress-nginx -o yaml | grep nodePort
|
||||
```
|
||||
|
||||
Of een overzichtelijker weergave:
|
||||
|
||||
```
|
||||
kubectl describe service ingress-nginx-controller -n ingress-nginx
|
||||
```
|
||||
|
||||
Je zoekt naar iets zoals:
|
||||
|
||||
```
|
||||
HTTP: Port 80 → NodePort 30080 (of een ander hoog nummer)
|
||||
HTTPS: Port 443 → NodePort 30443 (of een ander hoog nummer)
|
||||
```
|
||||
### Stap 3: Check de scaleway loadbalancer
|
||||
Er werd normaal gezien automatisch een loadbalancer aangemaakt. Check of dit klopt. Deze is automatisch correct geconfigureerd en kan niet worden aangepast.
|
||||
|
||||
### Stap 4: Verifieer de firewall rules
|
||||
|
||||
- Ga in de console naar Compute - CPU & GPU Instances
|
||||
- Ga naar de security groups tab
|
||||
- Klik op de security group voor je cluster (Kapsule Default Security Group)
|
||||
- Ga naar de rules tab, en check of de poort (3xxxx) is toegevoegd aan de firewall rules, en voeg die toe indien nog niet aanwezig.
|
||||
- Stel dit eerst in voor de volledige ipv4 range
|
||||
|
||||
### Stap 4: Test de Basis Setup
|
||||
Test of de ingress controller intern bereikbaar is (vervang de IP en NodePort door je eigen):
|
||||
|
||||
```
|
||||
kubectl run test-pod --image=curlimages/curl -it --rm -- curl -H "Host: evie.askeveai.com" http://172.16.16.5:31127
|
||||
```
|
||||
|
||||
Er moet een 404 boodschap komen (dat is goed! Het betekent dat nginx draait)
|
||||
|
||||
Test of de ingress controller extern bereikbaar is (pas IP aan):
|
||||
|
||||
```
|
||||
curl -H "Host: evie.askeveai.com" http://51.159.204.52
|
||||
```
|
||||
|
||||
## Fase 2: Deploy test applicatie
|
||||
|
||||
We hebben een kleine test applicatie geïntegreerd in staging-test-setup.yaml. Installeer deze via:
|
||||
|
||||
```
|
||||
kubectl apply -f staging-test-setup.yaml
|
||||
```
|
||||
|
||||
En check met
|
||||
|
||||
```
|
||||
curl -H "Host: evie-staging.askeveai.com" http://51.159.204.52/verify/
|
||||
```
|
||||
|
||||
### Later Uitbreiden
|
||||
Wanneer je echte services deploy, uncomment je de relevante ingress paths en deploy je de bijbehorende services. De verify service blijft beschikbaar voor debugging.
|
||||
Deze setup geeft je een professionele staging environment met ingebouwde monitoring en debug capabilities.
|
||||
|
||||
## Fase 3: Configureer DNS
|
||||
Maak het juist A-record aan in de DNS zone. Dit moet verwijzen naar de publieke IP van de loadbalancer.
|
||||
|
||||
Je kan testen met:
|
||||
|
||||
```
|
||||
curl http://evie-staging.askeveai.com/verify/
|
||||
```
|
||||
|
||||
In de browser zal dit waarschijnlijk niet werken, omdat de site nog niet is beveiligd met SSL.
|
||||
|
||||
## Fase 4: Bunny CDN Setup
|
||||
Eerst zorg je dat Bunny klaar is om te werken.
|
||||
|
||||
- Creëer een Pull zone - evie-staging
|
||||
- Origin = http://[IP van load balancer]
|
||||
- Host header = evie-staging.askeveai.com
|
||||
- Force SSL - Aan
|
||||
|
||||
Daarna wijzig je A-record in de DNS zone. (waarschijnlijk verwijderen en CNAME record toevoegen)
|
||||
|
||||
## Fase 5: Introductie Secure communication
|
||||
|
||||
### Installatie van SSL Certificaat in de bunny pull zone
|
||||
- Voeg een hostname toe aan de bunny pull zone (evie-staging.askeveai.com)
|
||||
- Voeg een SSL certificaat toe aan de bunny pull zone (volg gewoon de instructies)
|
||||
- Enable Force SSL
|
||||
|
||||
Je kan checken met:
|
||||
|
||||
```
|
||||
curl https://evie-staging.askeveai.com/verify/
|
||||
```
|
||||
|
||||
### Installatie cert-manager in de cluster
|
||||
|
||||
```
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml
|
||||
```
|
||||
|
||||
En het cert-manager-setup.yaml manifest toepassen (zorgen dat email adres en domein correct zijn)
|
||||
|
||||
```
|
||||
kubectl apply -f cert-manager-setup.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
461
documentation/Production Setup/eveai_architecture.md
Normal file
461
documentation/Production Setup/eveai_architecture.md
Normal file
@@ -0,0 +1,461 @@
|
||||
# EveAI Cloud Architectuur
|
||||
|
||||
## Overzicht
|
||||
De EveAI applicatie draait op een moderne cloud-native architectuur met Kubernetes op Scaleway, beschermd door Bunny.net CDN en ondersteund door diverse managed services.
|
||||
|
||||
## Architectuurdiagram (Aanbevolen Setup)
|
||||
|
||||
```
|
||||
Internet
|
||||
↓
|
||||
DNS (askeveai.com - alle subdomains)
|
||||
↓
|
||||
Bunny.net CDN (Multi-domain setup)
|
||||
├─ askeveai.com → WordPress Hosting -> Scaleway hosting (voorlopig enkel via plugin)
|
||||
├─ evie-staging.askeveai.com → Scaleway LB → Staging Cluster
|
||||
└─ evie.askeveai.com → Scaleway LB → Production Cluster
|
||||
↓
|
||||
Scaleway Load Balancer (Statisch IP)
|
||||
↓
|
||||
Kubernetes Cluster (Scaleway)
|
||||
↓
|
||||
Ingress Controller
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Applicaties │
|
||||
├─────────────────────────────────────┤
|
||||
│ • eveai_app (staging/production) │
|
||||
│ • eveai_api (staging/production) │
|
||||
│ • eveai_workers (staging/production)│
|
||||
│ • [andere pods] │
|
||||
└─────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Managed Services │
|
||||
├─────────────────────────────────────┤
|
||||
│ • Redis (per environment) │
|
||||
│ • PostgreSQL (per environment) │
|
||||
│ • Object Storage (S3/Minio) │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Componenten
|
||||
|
||||
### 1. CDN & Security Layer
|
||||
**Bunny.net CDN**
|
||||
- **Functie**: Content Delivery Network en security gateway
|
||||
- **Voordelen**:
|
||||
- DDoS bescherming en attack mitigation
|
||||
- Caching van statische bestanden
|
||||
- Ontlasting van de backend cluster
|
||||
- Verbeterde loading times voor eindgebruikers
|
||||
- Web Application Firewall functionaliteit
|
||||
|
||||
### 2. DNS & Multi-Domain Routing
|
||||
|
||||
**DNS Provider: EuroDNS**
|
||||
- **Hosting**: hosting.com (alleen WordPress hosting)
|
||||
- **Email**: ProtonMail (via domein records)
|
||||
- **Application**: Scaleway cluster
|
||||
|
||||
**Bunny.net Pull Zone Setup**
|
||||
- **Zone 1**: `askeveai.com` → Origin: hosting.com WordPress
|
||||
- **Zone 2**: `evie-staging.askeveai.com` → Origin: Scaleway LB IP
|
||||
- **Zone 3**: `evie.askeveai.com` → Origin: Scaleway LB IP
|
||||
|
||||
**DNS Records (EuroDNS) - Uitgebreid**
|
||||
```
|
||||
; Web traffic via Bunny.net
|
||||
A askeveai.com → Scaleway hosting IP
|
||||
A evie-staging.askeveai.com → Bunny.net IP
|
||||
A evie.askeveai.com → Bunny.net IP
|
||||
A static.askeveai.com → Bunny.net IP (voor static assets)
|
||||
|
||||
; Email records (ProtonMail) - blijven direct
|
||||
MX askeveai.com → mail.protonmail.ch (priority 10)
|
||||
MX askeveai.com → mailsec.protonmail.ch (priority 20)
|
||||
TXT askeveai.com → "v=spf1 include:_spf.protonmail.ch ~all"
|
||||
TXT protonmail._domainkey.askeveai.com → [DKIM key van ProtonMail]
|
||||
TXT _dmarc.askeveai.com → "v=DMARC1; p=quarantine; rua=..."
|
||||
|
||||
; Subdomains for email (if needed)
|
||||
CNAME autodiscover.askeveai.com → autodiscover.protonmail.ch
|
||||
CNAME autoconfig.askeveai.com → autoconfig.protonmail.ch
|
||||
```
|
||||
|
||||
### 3. Infrastructure Layer
|
||||
**Scaleway Load Balancer**
|
||||
- **Type**: Statisch extern IP adres
|
||||
- **Functie**: Entry point naar Kubernetes cluster
|
||||
- **Locatie**: Voor de cluster, distribueert verkeer naar Ingress
|
||||
|
||||
**Kubernetes Cluster (Scaleway)**
|
||||
- **Ingress Controller**: Routeert aanvragen naar juiste services
|
||||
- **Workloads**:
|
||||
- `eveai_app`: Frontend applicatie
|
||||
- `eveai_api`: Backend API services
|
||||
- `eveai_workers`: Background processing
|
||||
- Aanvullende applicatieve pods
|
||||
|
||||
### 4. Monitoring & Observability
|
||||
**Prometheus Stack (In-cluster)**
|
||||
- **Functie**: Business events monitoring
|
||||
- **Scope**: Applicatie-specifieke metrics en events
|
||||
|
||||
**Scaleway Cockpit**
|
||||
- **Functie**: Infrastructure monitoring
|
||||
- **Scope**: Performance en infrastructuur componenten
|
||||
|
||||
### 5. Managed Services
|
||||
**Redis (Scaleway Managed)**
|
||||
- **Functie**: Caching layer
|
||||
- **Voordeel**: Reduced latency, session storage
|
||||
|
||||
**PostgreSQL (Scaleway Managed)**
|
||||
- **Functie**: Primaire database
|
||||
- **Voordeel**: Managed backups, high availability
|
||||
|
||||
**Object Storage (Scaleway)**
|
||||
- **Interface**: S3-compatible via Minio client
|
||||
- **Functie**: File storage, static assets, backups
|
||||
|
||||
## Architectuuroverwegingen
|
||||
|
||||
### Huidige Setup Evaluatie
|
||||
|
||||
**Sterke Punten:**
|
||||
- ✅ Goede separation of concerns
|
||||
- ✅ Gebruik van managed services vermindert operationele overhead
|
||||
- ✅ CDN voor performance en security
|
||||
- ✅ Container-native met Kubernetes
|
||||
- ✅ Comprehensive monitoring setup
|
||||
|
||||
**Potentiële Verbeteringen:**
|
||||
- ✅ **Multi-domain setup via Bunny.net**: Alle traffic via CDN
|
||||
- ✅ **Environment isolation**: Aparte origins voor staging/production
|
||||
- 🤔 **Origin Protection**: Firewall rules om direct access te voorkomen
|
||||
- 🤔 **Kubernetes Ingress**: Host-based routing configureren voor multi-environment
|
||||
|
||||
## Email & DNS Overwegingen
|
||||
|
||||
### Email via ProtonMail (Blijft Direct)
|
||||
**Belangrijke opmerking**: Email records gaan **NIET** via Bunny.net. CDN's zijn alleen voor web traffic (HTTP/HTTPS). Email gebruikt andere protocollen (SMTP, IMAP, POP3) die niet via een CDN kunnen.
|
||||
|
||||
**Wat blijft hetzelfde:**
|
||||
- MX records blijven wijzen naar ProtonMail servers
|
||||
- SPF, DKIM, DMARC records blijven ongewijzigd
|
||||
- Email functionaliteit wordt niet beïnvloed door Bunny.net
|
||||
|
||||
**Voordeel van je setup:**
|
||||
- DNS bij EuroDNS: Flexibel om records te beheren
|
||||
- Hosting bij hosting.com: Makkelijk te migreren later
|
||||
- Email bij ProtonMail: Blijft stabiel tijdens migraties
|
||||
|
||||
### DNS Migratie Strategie (Vereenvoudigd)
|
||||
|
||||
**Huidige situatie:**
|
||||
```
|
||||
EuroDNS → hosting.com (WordPress + email config via cPanel)
|
||||
```
|
||||
|
||||
**Nieuwe situatie:**
|
||||
```
|
||||
EuroDNS → Bunny.net (web) + ProtonMail (email direct)
|
||||
```
|
||||
|
||||
**Migratiestappen:**
|
||||
1. **Preparatie**: Email records van cPanel naar EuroDNS overbrengen
|
||||
2. **Bunny.net setup**: Pull zones configureren
|
||||
3. **DNS switch**: A records naar Bunny.net, MX records direct naar ProtonMail
|
||||
4. **Later**: hosting.com opzeggen
|
||||
|
||||
## Bunny.net Setup Guide
|
||||
|
||||
### Stap 1: Pull Zones Aanmaken
|
||||
|
||||
**Pull Zone 1: WordPress Site**
|
||||
```
|
||||
Name: askeveai-wordpress
|
||||
Hostname: askeveai.com
|
||||
Origin URL: [hosting.com server IP/URL]
|
||||
```
|
||||
|
||||
**Pull Zone 2: Staging Environment**
|
||||
```
|
||||
Name: evie-staging
|
||||
Hostname: evie-staging.askeveai.com
|
||||
Origin URL: http://[scaleway-lb-ip]
|
||||
Host Header: evie-staging.askeveai.com
|
||||
```
|
||||
|
||||
**Pull Zone 3: Production Environment**
|
||||
```
|
||||
Name: evie-production
|
||||
Hostname: evie.askeveai.com
|
||||
Origin URL: http://[scaleway-lb-ip]
|
||||
Host Header: evie.askeveai.com
|
||||
```
|
||||
|
||||
**Pull Zone 4: Static Assets - Bunny Storage (Aanbevolen)**
|
||||
```
|
||||
Name: static-assets
|
||||
Type: Push Zone (Bunny Storage)
|
||||
Hostname: static.askeveai.com
|
||||
Storage: Direct upload to Bunny Storage
|
||||
API: FTP/SFTP/REST API upload
|
||||
```
|
||||
|
||||
**Alternatief: Pull Zone van Scaleway S3**
|
||||
```
|
||||
Name: static-assets-s3
|
||||
Type: Pull Zone
|
||||
Hostname: static.askeveai.com
|
||||
Origin URL: https://[scaleway-s3-bucket].s3.fr-par.scw.cloud
|
||||
```
|
||||
|
||||
### Stap 2: SSL/TLS Configuratie
|
||||
- **Force SSL**: Aan voor alle zones
|
||||
- **SSL Certificate**: Let's Encrypt (gratis) of Bunny.net certificates
|
||||
- **Origin Shield**: Europa (voor betere performance naar Scaleway)
|
||||
|
||||
### Stap 3: Security Settings
|
||||
- **Origin Shield Protection**: Alleen Bunny.net IP's kunnen origin bereiken
|
||||
- **WAF Rules**: Basis DDoS en attack protection
|
||||
- **Rate Limiting**: Per domain/endpoint configureren
|
||||
|
||||
## Static Assets Optimalisatie
|
||||
|
||||
### Huidige Aanpak (Sub-optimaal)
|
||||
```
|
||||
Browser → Bunny.net → Scaleway LB → Ingress → App Pod → Static file
|
||||
```
|
||||
|
||||
### Aanbevolen Aanpak: Direct Static Delivery
|
||||
```
|
||||
Browser → Bunny.net Edge → Static file (gecached op edge)
|
||||
```
|
||||
|
||||
### Implementatie Strategieën
|
||||
|
||||
**Optie 1: Bunny Storage (Aanbevolen)**
|
||||
```
|
||||
Build Process → Bunny Storage → Bunny CDN Edge → Browser
|
||||
- Upload: Direct naar Bunny Storage via API/FTP
|
||||
- Serve: Native performance, geen extra hops
|
||||
- Cost: Meestal goedkoper dan S3 + CDN
|
||||
- Speed: Optimaal, storage en CDN geïntegreerd
|
||||
```
|
||||
|
||||
**Optie 2: Scaleway Object Storage + Pull Zone**
|
||||
```
|
||||
Build Process → Scaleway S3 → Bunny Pull Zone → Browser
|
||||
- Upload: App → Scaleway S3 bucket
|
||||
- Serve: Bunny.net cache van S3 bucket
|
||||
- Voordeel: Backup in je eigen cloud, data sovereignty
|
||||
- Nadeel: Extra latency voor eerste request
|
||||
```
|
||||
|
||||
**Optie 3: Hybrid Approach**
|
||||
```
|
||||
- Critical assets: Bunny Storage (logo, CSS, JS)
|
||||
- User uploads: Scaleway S3 → Bunny Pull Zone
|
||||
- Development: Local static serving
|
||||
```
|
||||
|
||||
### Bunny Storage vs Scaleway S3
|
||||
|
||||
| Aspect | Bunny Storage | Scaleway S3 + Pull Zone |
|
||||
|--------|---------------|-------------------------|
|
||||
| **Performance** | ⭐⭐⭐⭐⭐ Native CDN | ⭐⭐⭐⭐ Extra hop |
|
||||
| **Cost** | ⭐⭐⭐⭐⭐ Integrated pricing | ⭐⭐⭐ S3 + CDN costs |
|
||||
| **Simplicity** | ⭐⭐⭐⭐⭐ One provider | ⭐⭐⭐ Two systems |
|
||||
| **Data Control** | ⭐⭐⭐ At Bunny | ⭐⭐⭐⭐⭐ In your cloud |
|
||||
| **Backup/Sync** | ⭐⭐⭐ Bunny dependent | ⭐⭐⭐⭐⭐ Full control |
|
||||
|
||||
### File Types voor Static Delivery
|
||||
**Ideaal voor CDN:**
|
||||
- ✅ Images (JPG, PNG, WebP, SVG)
|
||||
- ✅ CSS files
|
||||
- ✅ JavaScript bundles
|
||||
- ✅ Fonts (WOFF2, etc.)
|
||||
- ✅ Videos/audio files
|
||||
- ✅ PDF documents
|
||||
- ✅ Icons en favicons
|
||||
|
||||
**Blijven via app:**
|
||||
- ❌ Dynamic API responses
|
||||
- ❌ User-generated content (tenzij via upload flow)
|
||||
- ❌ Authentication-required files
|
||||
|
||||
## Kubernetes Ingress Configuratie
|
||||
|
||||
Met de multi-domain setup via Bunny.net moet je Ingress ook aangepast worden:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-ingress
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false" # SSL handled by Bunny.net
|
||||
spec:
|
||||
rules:
|
||||
- host: evie-staging.askeveai.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: eveai-staging-service
|
||||
port:
|
||||
number: 80
|
||||
- host: evie.askeveai.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: eveai-production-service
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
## Migratiestrategie (Uitgebreid)
|
||||
|
||||
### Fase 1: Bunny.net Setup (Geen downtime)
|
||||
1. Maak Pull Zones aan in Bunny.net
|
||||
2. Test via Bunny.net hostnames (zonder DNS wijziging)
|
||||
3. Configureer caching en security rules
|
||||
|
||||
### Fase 2: DNS Migratie (Minimale downtime)
|
||||
1. Kopieer email records van cPanel naar EuroDNS
|
||||
2. Verlaag TTL van huidige DNS records (1 uur van tevoren)
|
||||
3. Wijzig A records naar Bunny.net (MX records blijven ProtonMail)
|
||||
4. Monitor traffic en performance
|
||||
|
||||
### Fase 3: Origin Protection
|
||||
1. Configureer Scaleway firewall om alleen Bunny.net IP's toe te laten
|
||||
2. Test alle functionaliteit
|
||||
3. Monitor security logs
|
||||
|
||||
### Fase 4: WordPress Migratie naar Scaleway (Optioneel)
|
||||
**Planning overwegingen:**
|
||||
- **Database**: WordPress DB naar Scaleway PostgreSQL of aparte MySQL
|
||||
- **Files**: wp-content naar Scaleway Object Storage
|
||||
- **SSL**: Blijft via Bunny.net (geen wijzigingen)
|
||||
- **Performance**: Mogelijk sneller door proximity met EveAI
|
||||
|
||||
**Migratie opties:**
|
||||
1. **Lift & Shift**: VM op Scaleway met traditionele LAMP stack
|
||||
2. **Modernisering**: WordPress in Kubernetes container
|
||||
3. **Hybrid**: Behoud hosting.com tot je tevreden bent met K8s setup
|
||||
|
||||
### Fase 5: Hosting.com Opzegging
|
||||
1. Bevestig WordPress werkt 100% op Scaleway
|
||||
2. Final backup van hosting.com
|
||||
3. Annuleer hosting.com contract
|
||||
4. Email en EveAI blijven ongestoord werken
|
||||
|
||||
## Toekomstige Evolutie: WordPress op Scaleway
|
||||
|
||||
### Optie 1: WordPress als Managed Service
|
||||
**Scaleway WordPress Hosting** (als beschikbaar)
|
||||
- Managed WordPress environment
|
||||
- Automatische updates en backups
|
||||
- Geïntegreerd met andere Scaleway services
|
||||
|
||||
### Optie 2: WordPress in Kubernetes Cluster
|
||||
**Voordelen:**
|
||||
- ✅ Alles op één platform (Scaleway)
|
||||
- ✅ Gedeelde resources en monitoring
|
||||
- ✅ Consistent deployment pipeline
|
||||
- ✅ Cost optimization
|
||||
- ✅ Uniform backup/disaster recovery
|
||||
|
||||
**WordPress in K8s Setup:**
|
||||
```yaml
|
||||
# WordPress Deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: wordpress-deployment
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: wordpress
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: wordpress
|
||||
image: wordpress:6-apache
|
||||
env:
|
||||
- name: WORDPRESS_DB_HOST
|
||||
value: [scaleway-postgresql-endpoint]
|
||||
- name: WORDPRESS_DB_NAME
|
||||
value: wordpress_db
|
||||
volumeMounts:
|
||||
- name: wordpress-storage
|
||||
mountPath: /var/www/html/wp-content
|
||||
volumes:
|
||||
- name: wordpress-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: wordpress-pvc
|
||||
```
|
||||
|
||||
### Optie 3: WordPress op Scaleway Instances
|
||||
**Instance-based hosting:**
|
||||
- Dedicated VM voor WordPress
|
||||
- Meer controle over environment
|
||||
- Traditionele hosting aanpak op moderne cloud
|
||||
|
||||
### Aanbevolen Aanpak: Kubernetes
|
||||
**Architectuur zou worden:**
|
||||
```
|
||||
Bunny.net CDN
|
||||
├─ askeveai.com → Scaleway LB → WordPress Pod
|
||||
├─ evie-staging.askeveai.com → Scaleway LB → EveAI Staging
|
||||
└─ evie.askeveai.com → Scaleway LB → EveAI Production
|
||||
```
|
||||
|
||||
**Gedeelde Resources:**
|
||||
- **PostgreSQL**: Aparte database voor WordPress + EveAI
|
||||
- **Object Storage**: WordPress media + EveAI assets
|
||||
- **Redis**: WordPress caching + EveAI caching
|
||||
- **Monitoring**: Unified observability voor alles
|
||||
|
||||
## Disaster Recovery & Backup
|
||||
|
||||
- **Database**: Managed PostgreSQL automated backups
|
||||
- **Object Storage**: Cross-region replication overwegen
|
||||
- **Application State**: Stateless design waar mogelijk
|
||||
- **Configuration**: GitOps approach voor cluster configuration
|
||||
|
||||
## Conclusie
|
||||
|
||||
De voorgestelde architectuur biedt een uitstekende balans tussen performance, security en operationele eenvoud. Door alles via Bunny.net te routeren krijg je:
|
||||
|
||||
**Directe voordelen:**
|
||||
- Uniforme security en performance voor alle domeinen
|
||||
- Eenvoudige SSL management
|
||||
- Cost-effective CDN voor alle content
|
||||
- Flexibiliteit voor toekomstige migraties
|
||||
|
||||
**Strategische voordelen:**
|
||||
- **Scaleway consolidatie**: Mogelijk om WordPress ook naar Scaleway te migreren
|
||||
- **Operational simplicity**: Eén cloud provider voor applicatie infrastructure
|
||||
- **Cost optimization**: Gedeelde resources en bundelvoordelen
|
||||
- **Technical consistency**: Uniform tooling en monitoring
|
||||
|
||||
**Aanbevolen roadmap:**
|
||||
1. **Nu**: Bunny.net implementeren voor alle domeinen
|
||||
2. **Q1 2026**: WordPress evalueren voor Scaleway migratie
|
||||
3. **Q2 2026**: hosting.com contract beëindigen
|
||||
4. **Resultaat**: Volledig cloud-native platform op Scaleway + Bunny.net
|
||||
|
||||
Deze aanpak maximaliseert flexibiliteit terwijl risico's worden geminimaliseerd door gefaseerde implementatie.
|
||||
|
||||
---
|
||||
*Architectuurdocument gegenereerd op: Augustus 2025*
|
||||
@@ -0,0 +1,104 @@
|
||||
graph TB
|
||||
%% External Users
|
||||
Users[👥 Users] --> Internet[🌐 Internet]
|
||||
|
||||
%% DNS Layer
|
||||
Internet --> EuroDNS[📡 EuroDNS<br/>askeveai.com]
|
||||
|
||||
%% Email Flow (Direct)
|
||||
EuroDNS --> ProtonMail[📧 ProtonMail<br/>MX Records]
|
||||
|
||||
%% Web Traffic via Bunny.net
|
||||
EuroDNS --> BunnyNet[🐰 Bunny.net CDN]
|
||||
|
||||
%% Bunny.net Pull Zones + Storage
|
||||
BunnyNet --> WP_Zone[📝 WordPress Zone<br/>askeveai.com]
|
||||
BunnyNet --> Staging_Zone[🧪 Staging Zone<br/>evie-staging.askeveai.com]
|
||||
BunnyNet --> Prod_Zone[🚀 Production Zone<br/>evie.askeveai.com]
|
||||
BunnyNet --> Static_Zone[📦 Static Assets Zone<br/>static.askeveai.com]
|
||||
BunnyNet --> BunnyStorage[🗂️ Bunny Storage<br/>Static Files]
|
||||
|
||||
%% WordPress Origin
|
||||
WP_Zone --> HostingCom[🏠 hosting.com<br/>WordPress Site]
|
||||
|
||||
%% Scaleway Infrastructure
|
||||
subgraph Scaleway["☁️ Scaleway Cloud Platform"]
|
||||
|
||||
%% Load Balancer
|
||||
ScalewayLB[⚖️ Load Balancer<br/>Static IP]
|
||||
|
||||
%% Kubernetes Cluster
|
||||
subgraph K8sCluster["🐳 Kubernetes Cluster"]
|
||||
Ingress[🚪 Ingress Controller<br/>Host-based Routing]
|
||||
|
||||
%% Application Pods
|
||||
subgraph AppPods["📱 Application Pods"]
|
||||
EveAI_App[evie_app<br/>Frontend]
|
||||
EveAI_API[evie_api<br/>Backend API]
|
||||
EveAI_Workers[evie_workers<br/>Background Jobs]
|
||||
Other_Pods[... other pods]
|
||||
end
|
||||
|
||||
%% Monitoring
|
||||
subgraph Monitoring["📊 Monitoring"]
|
||||
Prometheus[🔥 Prometheus<br/>Business Events]
|
||||
Grafana[📈 Grafana<br/>Dashboards]
|
||||
end
|
||||
end
|
||||
|
||||
%% Managed Services
|
||||
subgraph ManagedServices["🛠️ Managed Services"]
|
||||
Redis[🔴 Redis<br/>Caching Layer]
|
||||
PostgreSQL[🐘 PostgreSQL<br/>Database]
|
||||
ObjectStorage[📂 Object Storage<br/>S3 Compatible]
|
||||
end
|
||||
|
||||
%% Cockpit Monitoring
|
||||
Cockpit[🚁 Scaleway Cockpit<br/>Infrastructure Monitoring]
|
||||
end
|
||||
|
||||
%% Connections to Scaleway
|
||||
Staging_Zone --> ScalewayLB
|
||||
Prod_Zone --> ScalewayLB
|
||||
Static_Zone --> BunnyStorage
|
||||
|
||||
%% Internal Scaleway Connections
|
||||
ScalewayLB --> Ingress
|
||||
|
||||
Ingress --> EveAI_App
|
||||
Ingress --> EveAI_API
|
||||
Ingress --> EveAI_Workers
|
||||
Ingress --> Other_Pods
|
||||
|
||||
EveAI_App --> Redis
|
||||
EveAI_API --> PostgreSQL
|
||||
EveAI_API --> Redis
|
||||
EveAI_Workers --> PostgreSQL
|
||||
EveAI_Workers --> Redis
|
||||
EveAI_API --> ObjectStorage
|
||||
|
||||
%% Monitoring Connections
|
||||
EveAI_App --> Prometheus
|
||||
EveAI_API --> Prometheus
|
||||
EveAI_Workers --> Prometheus
|
||||
Prometheus --> Grafana
|
||||
|
||||
%% Cockpit monitors everything
|
||||
ScalewayLB --> Cockpit
|
||||
K8sCluster --> Cockpit
|
||||
ManagedServices --> Cockpit
|
||||
|
||||
%% Styling
|
||||
classDef bunnynet fill:#ff6b35,stroke:#333,stroke-width:2px,color:#fff
|
||||
classDef scaleway fill:#4c1d95,stroke:#333,stroke-width:2px,color:#fff
|
||||
classDef external fill:#10b981,stroke:#333,stroke-width:2px,color:#fff
|
||||
classDef monitoring fill:#f59e0b,stroke:#333,stroke-width:2px,color:#fff
|
||||
classDef managed fill:#8b5cf6,stroke:#333,stroke-width:2px,color:#fff
|
||||
classDef apps fill:#06b6d4,stroke:#333,stroke-width:2px,color:#fff
|
||||
|
||||
class BunnyNet,WP_Zone,Staging_Zone,Prod_Zone,Static_Zone,BunnyStorage bunnynet
|
||||
class EuroDNS,ProtonMail,HostingCom,Users,Internet external
|
||||
class ScalewayLB,Ingress,Cockpit scaleway
|
||||
class Prometheus,Grafana monitoring
|
||||
class Redis,PostgreSQL,ObjectStorage managed
|
||||
class EveAI_App,EveAI_API,EveAI_Workers,Other_Pods apps
|
||||
@@ -59,11 +59,7 @@ data:
|
||||
|
||||
# API Keys (base64 encoded)
|
||||
OPENAI_API_KEY: c2stcHJvai04UjBqV3p3akw3UGVvUHlNaEpUWlQzQmxia0ZKTGI2SGZSR0JIcjljRVZGV0VoVTc=
|
||||
GROQ_API_KEY: Z3NrX0dIZlRkcFlwbmFTS1pGSklzSlJBV0dkeWIzRlkzNWN2RjZBTHBMVThEYzR0SUZMVWZRNA==
|
||||
MISTRAL_API_KEY: MGY0WmlRMWtJcGdJS1RIWDhkMGE4R09EMnZBZ1ZxRW4=
|
||||
ANTHROPIC_API_KEY: c2stYW50LWFwaTAzLWMyVG1remJSZWVHaFhCTzVKeE5INkJKTnlsUkRvbmM5R21aZDBIZRbrvVyeWVrZWMyVHJ2eWVrZWMyVGpOeWVrZWMybYk95Z1k=
|
||||
LANGCHAIN_API_KEY: bHN2Ml9za180ZmViMWU2MDVlNzA0MGFlYjM1N2M1OTAyNWZiZWEzMl9jNWU4NWVjNDEx
|
||||
SERPER_API_KEY: ZTRjNTUzODU2ZDBlNmI1YTE3MWVjNWU2YjY5ZDg3NDI4NWI5YmFkZg==
|
||||
|
||||
# Application secrets
|
||||
SECRET_KEY: OTc4NjdjMTQ5MWJlYTVlZTZhOGU4NDM2ZWIxMWJmMmJhNmE2OWZmNTNhYjFiMTdlY2JhNDUwZDBmMmU1NzJlMQ==
|
||||
|
||||
7
scaleway/configs/production.conf
Normal file
7
scaleway/configs/production.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
# Scaleway Production Environment Configuration (for later)
|
||||
SCALEWAY_REGISTRY="rg.fr-par.scw.cloud/eveai-production"
|
||||
SCALEWAY_API_KEY="your_production_api_key_here"
|
||||
SCALEWAY_REGION="fr-par"
|
||||
SCALEWAY_PROJECT_ID="production_project_id"
|
||||
K8S_CLUSTER_NAME="eveai-production-cluster"
|
||||
K8S_CONTEXT="scaleway-production"
|
||||
7
scaleway/configs/staging.conf
Normal file
7
scaleway/configs/staging.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
# Scaleway Staging Environment Configuration
|
||||
SCALEWAY_REGISTRY="rg.fr-par.scw.cloud/eveai-staging"
|
||||
SCALEWAY_API_KEY="7cd4b2c4-448b-4a88-9119-0eb7f192c137"
|
||||
SCALEWAY_REGION="fr-par"
|
||||
SCALEWAY_PROJECT_ID="ad7d2ed9-252b-4b2a-9f4c-daca3edc4c4b"
|
||||
K8S_CLUSTER_NAME="eveai-staging"
|
||||
K8S_CONTEXT="eveai-staging"
|
||||
88
scaleway/manifests/staging/cert-manager-setup.yaml
Normal file
88
scaleway/manifests/staging/cert-manager-setup.yaml
Normal file
@@ -0,0 +1,88 @@
|
||||
# cert-manager-setup.yaml
|
||||
# Install cert-manager for automatic SSL certificate management
|
||||
|
||||
# Install cert-manager CRDs first
|
||||
# kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml
|
||||
|
||||
# cert-manager namespace
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
||||
|
||||
---
|
||||
# ClusterIssuer for Let's Encrypt staging (test first)
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
email: pieter@askeveai.com # CHANGE THIS
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
|
||||
---
|
||||
# ClusterIssuer for Let's Encrypt production
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: pieter@askeveai.com # CHANGE THIS
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
|
||||
---
|
||||
# Updated ingress with TLS configuration
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-staging-ingress-https
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: eveai
|
||||
environment: staging
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
|
||||
# Use staging issuer first for testing
|
||||
cert-manager.io/cluster-issuer: letsencrypt-staging
|
||||
# After verification, switch to: letsencrypt-prod
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- evie-staging.askeveai.com
|
||||
secretName: evie-staging-tls
|
||||
rules:
|
||||
- host: evie-staging.askeveai.com
|
||||
http:
|
||||
paths:
|
||||
- path: /verify
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
285
scaleway/manifests/staging/ingress-controller-setup.yaml
Normal file
285
scaleway/manifests/staging/ingress-controller-setup.yaml
Normal file
@@ -0,0 +1,285 @@
|
||||
# ingress-controller-setup.yaml
|
||||
# NGINX Ingress Controller voor gebruik met externe LoadBalancer
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
|
||||
---
|
||||
# Ingress Controller Deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
serviceAccountName: ingress-nginx
|
||||
containers:
|
||||
- name: controller
|
||||
image: registry.k8s.io/ingress-nginx/controller:v1.8.2
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-controller-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
- name: webhook
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
volumeMounts:
|
||||
- mountPath: /usr/local/certificates/
|
||||
name: webhook-cert
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
|
||||
---
|
||||
# NodePort Service - Dit is waar je LoadBalancer naar wijst!
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
nodePort: 30080 # Externe LoadBalancer wijst naar dit poort op elke node
|
||||
- port: 443
|
||||
targetPort: 443
|
||||
protocol: TCP
|
||||
name: https
|
||||
nodePort: 30443 # Voor HTTPS traffic
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
|
||||
---
|
||||
# ServiceAccount en RBAC
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "endpoints", "nodes", "pods", "secrets", "namespaces"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
resourceNames: ["ingress-nginx-controller"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingressclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: ["endpointslices"]
|
||||
verbs: ["list", "watch", "get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "pods", "secrets", "endpoints"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingressclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["ingress-controller-leader"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: ["endpointslices"]
|
||||
verbs: ["list", "watch", "get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
|
||||
---
|
||||
# ConfigMap voor Ingress Controller configuratie
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
|
||||
---
|
||||
# IngressClass definitie
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
502
scaleway/manifests/staging/staging-test-setup.yaml
Normal file
502
scaleway/manifests/staging/staging-test-setup.yaml
Normal file
@@ -0,0 +1,502 @@
|
||||
# staging-test-setup.yaml
|
||||
# Complete test and debug setup for EveAI staging environment
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: eveai-staging
|
||||
labels:
|
||||
environment: staging
|
||||
app: eveai
|
||||
|
||||
---
|
||||
# ConfigMap with HTML content for the test interface
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: verify-content
|
||||
namespace: eveai-staging
|
||||
data:
|
||||
index.html: |
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>EveAI Staging - System Verification</title>
|
||||
<style>
|
||||
* { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
||||
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||
min-height: 100vh;
|
||||
padding: 20px;
|
||||
}
|
||||
.container {
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
background: white;
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 20px 40px rgba(0,0,0,0.1);
|
||||
overflow: hidden;
|
||||
}
|
||||
.header {
|
||||
background: #2d3748;
|
||||
color: white;
|
||||
padding: 30px;
|
||||
text-align: center;
|
||||
}
|
||||
.header h1 { font-size: 2.5em; margin-bottom: 10px; }
|
||||
.subtitle { opacity: 0.8; font-size: 1.1em; }
|
||||
.content { padding: 40px; }
|
||||
.status-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
||||
gap: 20px;
|
||||
margin-bottom: 40px;
|
||||
}
|
||||
.status-card {
|
||||
border: 1px solid #e2e8f0;
|
||||
border-radius: 8px;
|
||||
padding: 20px;
|
||||
background: #f8fafc;
|
||||
}
|
||||
.status-card h3 {
|
||||
color: #2d3748;
|
||||
margin-bottom: 15px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.status-indicator {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-radius: 50%;
|
||||
margin-right: 10px;
|
||||
}
|
||||
.healthy { background: #48bb78; }
|
||||
.warning { background: #ed8936; }
|
||||
.info { background: #4299e1; }
|
||||
.debug-section {
|
||||
background: #1a202c;
|
||||
color: #e2e8f0;
|
||||
border-radius: 8px;
|
||||
padding: 30px;
|
||||
margin-top: 30px;
|
||||
}
|
||||
.debug-section h3 { color: #90cdf4; margin-bottom: 20px; }
|
||||
.endpoint {
|
||||
background: #2d3748;
|
||||
border-radius: 4px;
|
||||
padding: 15px;
|
||||
margin: 10px 0;
|
||||
font-family: 'Courier New', monospace;
|
||||
border-left: 4px solid #4299e1;
|
||||
}
|
||||
.endpoint a { color: #90cdf4; text-decoration: none; }
|
||||
.endpoint a:hover { text-decoration: underline; }
|
||||
.tools {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 15px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
.tool-button {
|
||||
background: #4299e1;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 12px 20px;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
}
|
||||
.tool-button:hover { background: #3182ce; }
|
||||
.info-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin-top: 15px;
|
||||
}
|
||||
.info-table th, .info-table td {
|
||||
text-align: left;
|
||||
padding: 8px 12px;
|
||||
border-bottom: 1px solid #e2e8f0;
|
||||
}
|
||||
.info-table th { background: #f7fafc; font-weight: 600; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="header">
|
||||
<h1>EveAI Staging Environment</h1>
|
||||
<div class="subtitle">System Verification & Debug Console</div>
|
||||
</div>
|
||||
|
||||
<div class="content">
|
||||
<div class="status-grid">
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator healthy"></span>Cluster Status</h3>
|
||||
<p><strong>Environment:</strong> Staging</p>
|
||||
<p><strong>Ingress:</strong> NGINX</p>
|
||||
<p><strong>LoadBalancer:</strong> Scaleway (Automatic)</p>
|
||||
<p><strong>CDN:</strong> Bunny.net (Planned)</p>
|
||||
</div>
|
||||
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator healthy"></span>Network</h3>
|
||||
<p><strong>Host:</strong> <span id="hostname">Loading...</span></p>
|
||||
<p><strong>IP:</strong> <span id="clientip">Loading...</span></p>
|
||||
<p><strong>User-Agent:</strong> <span id="useragent">Loading...</span></p>
|
||||
</div>
|
||||
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator info"></span>Container Info</h3>
|
||||
<table class="info-table">
|
||||
<tr><th>Pod Name</th><td id="podname">verify-service</td></tr>
|
||||
<tr><th>Namespace</th><td>eveai-staging</td></tr>
|
||||
<tr><th>Container</th><td>nginx:alpine</td></tr>
|
||||
<tr><th>Path</th><td>/verify</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator warning"></span>Planned Services</h3>
|
||||
<p><strong>/admin</strong> - Admin interface (Not deployed)</p>
|
||||
<p><strong>/api</strong> - Backend API (Not deployed)</p>
|
||||
<p><strong>/client</strong> - Frontend app (Not deployed)</p>
|
||||
<p><strong>/verify</strong> - This debug service ✓</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="debug-section">
|
||||
<h3>Debug & Health Endpoints</h3>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>Health Check:</strong>
|
||||
<a href="/verify/health">/verify/health</a> - Basic health status
|
||||
</div>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>System Info:</strong>
|
||||
<a href="/verify/info">/verify/info</a> - Detailed system information
|
||||
</div>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>Headers:</strong>
|
||||
<a href="/verify/headers">/verify/headers</a> - Request headers analysis
|
||||
</div>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>Network Test:</strong>
|
||||
<a href="/verify/network">/verify/network</a> - Network connectivity tests
|
||||
</div>
|
||||
|
||||
<div class="tools">
|
||||
<button class="tool-button" onclick="testHealth()">Test Health</button>
|
||||
<button class="tool-button" onclick="testConnectivity()">Test APIs</button>
|
||||
<button class="tool-button" onclick="showHeaders()">Show Headers</button>
|
||||
<button class="tool-button" onclick="downloadLogs()">Get Logs</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Populate dynamic content
|
||||
document.getElementById('hostname').textContent = window.location.hostname;
|
||||
document.getElementById('clientip').textContent = 'Detected via headers';
|
||||
document.getElementById('useragent').textContent = navigator.userAgent.substring(0, 50) + '...';
|
||||
|
||||
// Debug functions
|
||||
function testHealth() {
|
||||
fetch('/verify/health')
|
||||
.then(response => response.text())
|
||||
.then(data => alert('Health check: ' + data))
|
||||
.catch(err => alert('Health check failed: ' + err));
|
||||
}
|
||||
|
||||
function testConnectivity() {
|
||||
alert('API connectivity tests would run here when APIs are deployed');
|
||||
}
|
||||
|
||||
function showHeaders() {
|
||||
window.open('/verify/headers', '_blank');
|
||||
}
|
||||
|
||||
function downloadLogs() {
|
||||
alert('Log download feature - would fetch container logs');
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
health.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Health Check</title></head>
|
||||
<body>
|
||||
<h1>Health Status: OK</h1>
|
||||
<p>Timestamp: <script>document.write(new Date().toISOString())</script></p>
|
||||
<p>Service: EveAI Staging Verification</p>
|
||||
<p>Status: All systems operational</p>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
info.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>System Information</title></head>
|
||||
<body>
|
||||
<h1>System Information</h1>
|
||||
<h2>Environment</h2>
|
||||
<ul>
|
||||
<li>Namespace: eveai-staging</li>
|
||||
<li>Service: verify-service</li>
|
||||
<li>Path: /verify</li>
|
||||
<li>Container: nginx:alpine</li>
|
||||
</ul>
|
||||
<h2>Network</h2>
|
||||
<ul>
|
||||
<li>Ingress Controller: NGINX</li>
|
||||
<li>LoadBalancer: Scaleway Automatic</li>
|
||||
<li>External IP: Via LoadBalancer</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
headers.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Request Headers</title></head>
|
||||
<body>
|
||||
<h1>Request Headers Analysis</h1>
|
||||
<p>This page would show detailed request headers in a production implementation.</p>
|
||||
<p>Useful for debugging CDN, LoadBalancer, and ingress header forwarding.</p>
|
||||
<h2>Expected Headers</h2>
|
||||
<ul>
|
||||
<li>Host: evie-staging.askeveai.com</li>
|
||||
<li>X-Forwarded-For: (Client IP)</li>
|
||||
<li>X-Forwarded-Proto: http/https</li>
|
||||
<li>User-Agent: (Browser/Tool)</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
network.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Network Tests</title></head>
|
||||
<body>
|
||||
<h1>Network Connectivity Tests</h1>
|
||||
<p>This page would run network connectivity tests:</p>
|
||||
<h2>Internal Tests</h2>
|
||||
<ul>
|
||||
<li>DNS Resolution</li>
|
||||
<li>Service Discovery</li>
|
||||
<li>Database Connectivity (when deployed)</li>
|
||||
</ul>
|
||||
<h2>External Tests</h2>
|
||||
<ul>
|
||||
<li>Internet Connectivity</li>
|
||||
<li>External API Reachability</li>
|
||||
<li>CDN Performance</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
---
|
||||
# Custom nginx configuration
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: verify-nginx-config
|
||||
namespace: eveai-staging
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
# ACME Challenge support for SSL certificate verification
|
||||
location /.well-known/acme-challenge/ {
|
||||
access_log off;
|
||||
return 200 "acme-challenge-response";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Health endpoint for ingress controller (root level)
|
||||
location /healthz {
|
||||
access_log off;
|
||||
return 200 "healthy\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Health endpoint
|
||||
location /health {
|
||||
try_files /health.html =404;
|
||||
}
|
||||
|
||||
# Info endpoint
|
||||
location /info {
|
||||
try_files /info.html =404;
|
||||
}
|
||||
|
||||
# Headers analysis
|
||||
location /headers {
|
||||
try_files /headers.html =404;
|
||||
}
|
||||
|
||||
# Network tests
|
||||
location /network {
|
||||
try_files /network.html =404;
|
||||
}
|
||||
|
||||
# Main interface - serve index.html for all other requests
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
# Verification service deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: verify-service
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: verify-service
|
||||
component: verification
|
||||
environment: staging
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: verify-service
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: verify-service
|
||||
component: verification
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: html-content
|
||||
mountPath: /usr/share/nginx/html
|
||||
- name: nginx-config
|
||||
mountPath: /etc/nginx/conf.d
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "64Mi"
|
||||
cpu: "100m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /verify/health
|
||||
port: 80
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /verify/health
|
||||
port: 80
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: html-content
|
||||
configMap:
|
||||
name: verify-content
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
name: verify-nginx-config
|
||||
|
||||
---
|
||||
# Service for the verification app
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: verify-service
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: verify-service
|
||||
spec:
|
||||
selector:
|
||||
app: verify-service
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
name: http
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
# Ingress rules with path-based routing
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-staging-ingress
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: eveai
|
||||
environment: staging
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: evie-staging.askeveai.com
|
||||
http:
|
||||
paths:
|
||||
# Verification service paths
|
||||
- path: /verify
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
|
||||
# Future services (commented out for now)
|
||||
# Admin service (not deployed yet)
|
||||
# - path: /admin
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: admin-service
|
||||
# port:
|
||||
# number: 80
|
||||
|
||||
# API service (not deployed yet)
|
||||
# - path: /api
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: api-service
|
||||
# port:
|
||||
# number: 8000
|
||||
|
||||
# Client/Frontend service (not deployed yet)
|
||||
# - path: /client
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: client-service
|
||||
# port:
|
||||
# number: 3000
|
||||
|
||||
# Default: root path to verification service
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
277
scaleway/push_to_scaleway.sh
Executable file
277
scaleway/push_to_scaleway.sh
Executable file
@@ -0,0 +1,277 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Function to display usage information
|
||||
usage() {
|
||||
echo "Usage: $0 <version> <environment> [options]"
|
||||
echo " version : Version to push (e.g., v1.2.3, v1.2.3-alpha)"
|
||||
echo " environment : Target environment (staging|production)"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --services <service1,service2,...> : Specific services to push (default: all EveAI services)"
|
||||
echo " --dry-run : Show what would be done without executing"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 v1.2.3-alpha staging"
|
||||
echo " $0 v2.0.0 production --services eveai_api,eveai_workers"
|
||||
echo " $0 v1.0.0-beta staging --dry-run"
|
||||
}
|
||||
|
||||
# Check if required arguments are provided
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "❌ Error: Version and environment are required"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
ENVIRONMENT=$2
|
||||
shift 2
|
||||
|
||||
# Default values
|
||||
SERVICES=""
|
||||
DRY_RUN=false
|
||||
|
||||
# Parse options
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--services)
|
||||
SERVICES="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "❌ Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected argument: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate environment
|
||||
if [[ "$ENVIRONMENT" != "staging" && "$ENVIRONMENT" != "production" ]]; then
|
||||
echo "❌ Error: Environment must be 'staging' or 'production'"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate version format (flexible semantic versioning)
|
||||
if [[ ! "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9\-]+)?$ ]]; then
|
||||
echo "❌ Error: Invalid version format. Expected format: v1.2.3 or v1.2.3-alpha"
|
||||
echo " Examples: v1.0.0, v2.1.3-beta, v1.0.0-rc1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure version starts with 'v'
|
||||
if [[ ! "$VERSION" =~ ^v ]]; then
|
||||
VERSION="v$VERSION"
|
||||
fi
|
||||
|
||||
# Get script directory to find config files
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CONFIG_FILE="$SCRIPT_DIR/configs/$ENVIRONMENT.conf"
|
||||
|
||||
# Check if config file exists
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo "❌ Error: Config file not found: $CONFIG_FILE"
|
||||
echo " Please create the config file with Scaleway credentials"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load configuration
|
||||
echo "📋 Loading configuration from: $CONFIG_FILE"
|
||||
source "$CONFIG_FILE"
|
||||
|
||||
# Validate required config variables
|
||||
if [[ -z "$SCALEWAY_REGISTRY" || -z "$SCALEWAY_API_KEY" ]]; then
|
||||
echo "❌ Error: Missing required configuration in $CONFIG_FILE"
|
||||
echo " Required: SCALEWAY_REGISTRY, SCALEWAY_API_KEY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Local registry configuration
|
||||
LOCAL_REGISTRY="registry.ask-eve-ai-local.com"
|
||||
ACCOUNT="josakola"
|
||||
|
||||
# Check if podman is available
|
||||
if ! command -v podman &> /dev/null; then
|
||||
echo "❌ Error: podman not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if yq is available
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo "❌ Error: yq not found (required for parsing compose file)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if compose file exists
|
||||
COMPOSE_FILE="../docker/compose_dev.yaml"
|
||||
if [[ ! -f "$COMPOSE_FILE" ]]; then
|
||||
echo "❌ Error: Compose file '$COMPOSE_FILE' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🚀 EveAI Scaleway Push Script"
|
||||
echo "📦 Version: $VERSION"
|
||||
echo "🌍 Environment: $ENVIRONMENT"
|
||||
echo "🏪 Local Registry: $LOCAL_REGISTRY"
|
||||
echo "☁️ Scaleway Registry: $SCALEWAY_REGISTRY"
|
||||
echo "👤 Account: $ACCOUNT"
|
||||
|
||||
# Get services to process
|
||||
if [[ -n "$SERVICES" ]]; then
|
||||
# Convert comma-separated list to array
|
||||
IFS=',' read -ra SERVICE_ARRAY <<< "$SERVICES"
|
||||
else
|
||||
# Get all EveAI services (excluding nginx as per requirements)
|
||||
SERVICE_ARRAY=()
|
||||
while IFS= read -r line; do
|
||||
SERVICE_ARRAY+=("$line")
|
||||
done < <(yq e '.services | keys | .[]' "$COMPOSE_FILE" | grep -E '^eveai_')
|
||||
fi
|
||||
|
||||
echo "🔍 Services to process: ${SERVICE_ARRAY[*]}"
|
||||
|
||||
# Function to check if image exists locally
|
||||
check_local_image_exists() {
|
||||
local image_name="$1"
|
||||
if podman image exists "$image_name" 2>/dev/null; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to authenticate with Scaleway registry
|
||||
authenticate_scaleway() {
|
||||
echo "🔐 Authenticating with Scaleway registry..."
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " 🔍 [DRY RUN] Would authenticate with Scaleway registry"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Extract registry hostname from full registry URL
|
||||
REGISTRY_HOST=$(echo "$SCALEWAY_REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
# Login to Scaleway registry using API key
|
||||
if ! echo "$SCALEWAY_API_KEY" | podman login --username nologin --password-stdin "$REGISTRY_HOST"; then
|
||||
echo " ❌ Failed to authenticate with Scaleway registry"
|
||||
echo " 💡 Check your API key in $CONFIG_FILE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo " ✅ Successfully authenticated with Scaleway registry"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Authenticate with Scaleway
|
||||
if ! authenticate_scaleway; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Process each service
|
||||
PROCESSED_SERVICES=()
|
||||
FAILED_SERVICES=()
|
||||
|
||||
for SERVICE in "${SERVICE_ARRAY[@]}"; do
|
||||
echo ""
|
||||
echo "🔄 Processing service: $SERVICE"
|
||||
|
||||
# Check if service exists in compose file
|
||||
if ! yq e ".services.$SERVICE" "$COMPOSE_FILE" | grep -q "image:"; then
|
||||
echo "⚠️ Warning: Service '$SERVICE' not found in $COMPOSE_FILE, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Construct image names
|
||||
LOCAL_VERSION_IMAGE="$LOCAL_REGISTRY/$ACCOUNT/$SERVICE:$VERSION"
|
||||
SCALEWAY_VERSION_IMAGE="$SCALEWAY_REGISTRY/$ACCOUNT/$SERVICE:$VERSION"
|
||||
|
||||
echo " 📥 Source: $LOCAL_VERSION_IMAGE"
|
||||
echo " 📤 Target: $SCALEWAY_VERSION_IMAGE"
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " 🔍 [DRY RUN] Would push $LOCAL_VERSION_IMAGE to $SCALEWAY_VERSION_IMAGE"
|
||||
PROCESSED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if local version image exists
|
||||
if ! check_local_image_exists "$LOCAL_VERSION_IMAGE"; then
|
||||
echo " ❌ Local version image not found: $LOCAL_VERSION_IMAGE"
|
||||
echo " 💡 Run tag_registry_version.sh first to create version tags"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Pull local version image to ensure we have it
|
||||
echo " 📥 Pulling local version image..."
|
||||
if ! podman pull "$LOCAL_VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to pull $LOCAL_VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Tag for Scaleway registry (direct push with same version tag)
|
||||
echo " 🏷️ Tagging for Scaleway registry..."
|
||||
if ! podman tag "$LOCAL_VERSION_IMAGE" "$SCALEWAY_VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to tag $LOCAL_VERSION_IMAGE as $SCALEWAY_VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Push to Scaleway registry
|
||||
echo " 📤 Pushing to Scaleway registry..."
|
||||
if ! podman push "$SCALEWAY_VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to push $SCALEWAY_VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Clean up local Scaleway tag
|
||||
echo " 🧹 Cleaning up local Scaleway tag..."
|
||||
podman rmi "$SCALEWAY_VERSION_IMAGE" 2>/dev/null || true
|
||||
|
||||
echo " ✅ Successfully pushed $SERVICE version $VERSION to Scaleway"
|
||||
PROCESSED_SERVICES+=("$SERVICE")
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo "📊 Summary:"
|
||||
echo "✅ Successfully processed: ${#PROCESSED_SERVICES[@]} services"
|
||||
if [[ ${#PROCESSED_SERVICES[@]} -gt 0 ]]; then
|
||||
printf " - %s\n" "${PROCESSED_SERVICES[@]}"
|
||||
fi
|
||||
|
||||
if [[ ${#FAILED_SERVICES[@]} -gt 0 ]]; then
|
||||
echo "❌ Failed: ${#FAILED_SERVICES[@]} services"
|
||||
printf " - %s\n" "${FAILED_SERVICES[@]}"
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo "🔍 This was a dry run - no actual changes were made"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [[ ${#FAILED_SERVICES[@]} -eq 0 ]]; then
|
||||
echo "🎉 All services successfully pushed to Scaleway $ENVIRONMENT!"
|
||||
echo "☁️ Images are available in Scaleway registry: $SCALEWAY_REGISTRY/$ACCOUNT/[service]:$VERSION"
|
||||
else
|
||||
echo "⚠️ Some services failed to process. Check the errors above."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕐 Finished at $(date +"%d/%m/%Y %H:%M:%S")"
|
||||
203
scaleway/scaleway_env_switch.sh
Executable file
203
scaleway/scaleway_env_switch.sh
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env zsh
|
||||
|
||||
# Function to display usage information
|
||||
usage() {
|
||||
echo "Usage: source $0 <environment>"
|
||||
echo " environment: The Scaleway environment to use (staging|production)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " source $0 staging"
|
||||
echo " source $0 production"
|
||||
}
|
||||
|
||||
# Check if the script is sourced - improved for both bash and zsh
|
||||
is_sourced() {
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
# In zsh, check if we're in a sourced context
|
||||
[[ "$ZSH_EVAL_CONTEXT" =~ "(:file|:cmdsubst)" ]] || [[ "$0" != "$ZSH_ARGZERO" ]]
|
||||
else
|
||||
# In bash, compare BASH_SOURCE with $0
|
||||
[[ "${BASH_SOURCE[0]}" != "${0}" ]]
|
||||
fi
|
||||
}
|
||||
|
||||
if ! is_sourced; then
|
||||
echo "❌ Error: This script must be sourced, not executed directly."
|
||||
echo "Please run: source $0 <environment>"
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
return 1 2>/dev/null || exit 1
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if an environment is provided
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
return 1
|
||||
fi
|
||||
|
||||
ENVIRONMENT=$1
|
||||
|
||||
# Validate environment
|
||||
if [[ "$ENVIRONMENT" != "staging" && "$ENVIRONMENT" != "production" ]]; then
|
||||
echo "❌ Invalid environment: $ENVIRONMENT"
|
||||
usage
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get script directory to find config files
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
|
||||
CONFIG_FILE="$SCRIPT_DIR/configs/$ENVIRONMENT.conf"
|
||||
|
||||
# Check if config file exists
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo "❌ Error: Config file not found: $CONFIG_FILE"
|
||||
echo " Please create the config file with Scaleway credentials"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Load configuration
|
||||
echo "📋 Loading Scaleway $ENVIRONMENT configuration..."
|
||||
source "$CONFIG_FILE"
|
||||
|
||||
# Validate required config variables
|
||||
if [[ -z "$SCALEWAY_REGISTRY" || -z "$SCALEWAY_API_KEY" ]]; then
|
||||
echo "❌ Error: Missing required configuration in $CONFIG_FILE"
|
||||
echo " Required: SCALEWAY_REGISTRY, SCALEWAY_API_KEY"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if kubectl is available for K8s context switching
|
||||
KUBECTL_AVAILABLE=false
|
||||
if command -v kubectl &> /dev/null; then
|
||||
KUBECTL_AVAILABLE=true
|
||||
else
|
||||
echo "⚠️ Warning: kubectl not found - K8s context switching will be skipped"
|
||||
fi
|
||||
|
||||
echo "☁️ Scaleway Environment Switch"
|
||||
echo "🌍 Environment: $ENVIRONMENT"
|
||||
echo "🏪 Registry: $SCALEWAY_REGISTRY"
|
||||
echo "🌐 Region: ${SCALEWAY_REGION:-fr-par}"
|
||||
|
||||
# Set environment variables
|
||||
export SCALEWAY_ENVIRONMENT=$ENVIRONMENT
|
||||
export SCALEWAY_REGISTRY=$SCALEWAY_REGISTRY
|
||||
export SCALEWAY_API_KEY=$SCALEWAY_API_KEY
|
||||
export SCALEWAY_REGION=${SCALEWAY_REGION:-fr-par}
|
||||
export SCALEWAY_PROJECT_ID=${SCALEWAY_PROJECT_ID:-}
|
||||
|
||||
# Handle kubectl context switching if available
|
||||
if [[ "$KUBECTL_AVAILABLE" == true && -n "$K8S_CONTEXT" ]]; then
|
||||
echo "🔄 Switching kubectl context..."
|
||||
|
||||
# Check if the context exists
|
||||
if kubectl config get-contexts "$K8S_CONTEXT" &>/dev/null; then
|
||||
if kubectl config use-context "$K8S_CONTEXT" &>/dev/null; then
|
||||
echo "✅ Switched to kubectl context: $K8S_CONTEXT"
|
||||
export KUBECTL_CONTEXT=$K8S_CONTEXT
|
||||
else
|
||||
echo "⚠️ Warning: Failed to switch to kubectl context: $K8S_CONTEXT"
|
||||
fi
|
||||
else
|
||||
echo "⚠️ Warning: kubectl context '$K8S_CONTEXT' does not exist"
|
||||
echo " 💡 You may need to configure this context manually"
|
||||
fi
|
||||
elif [[ -n "$K8S_CONTEXT" ]]; then
|
||||
echo "⚠️ kubectl not available - context switching skipped"
|
||||
export KUBECTL_CONTEXT=$K8S_CONTEXT
|
||||
fi
|
||||
|
||||
# Define helper functions for Scaleway operations
|
||||
scaleway_login() {
|
||||
echo "🔐 Logging into Scaleway registry..."
|
||||
|
||||
# Extract registry hostname from full registry URL
|
||||
REGISTRY_HOST=$(echo "$SCALEWAY_REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
# Login to Scaleway registry using API key
|
||||
if echo "$SCALEWAY_API_KEY" | podman login --username nologin --password-stdin "$REGISTRY_HOST"; then
|
||||
echo "✅ Successfully logged into Scaleway registry"
|
||||
else
|
||||
echo "❌ Failed to login to Scaleway registry"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
scaleway_logout() {
|
||||
echo "🔓 Logging out of Scaleway registry..."
|
||||
|
||||
# Extract registry hostname from full registry URL
|
||||
REGISTRY_HOST=$(echo "$SCALEWAY_REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
if podman logout "$REGISTRY_HOST" 2>/dev/null; then
|
||||
echo "✅ Successfully logged out of Scaleway registry"
|
||||
else
|
||||
echo "⚠️ Warning: Could not logout of Scaleway registry (may not have been logged in)"
|
||||
fi
|
||||
}
|
||||
|
||||
scaleway_info() {
|
||||
echo "📋 Current Scaleway Configuration:"
|
||||
echo " 🌍 Environment: $SCALEWAY_ENVIRONMENT"
|
||||
echo " 🏪 Registry: $SCALEWAY_REGISTRY"
|
||||
echo " 🌐 Region: $SCALEWAY_REGION"
|
||||
if [[ -n "$SCALEWAY_PROJECT_ID" ]]; then
|
||||
echo " 📁 Project ID: $SCALEWAY_PROJECT_ID"
|
||||
fi
|
||||
if [[ -n "$KUBECTL_CONTEXT" ]]; then
|
||||
echo " ⚙️ K8s Context: $KUBECTL_CONTEXT"
|
||||
fi
|
||||
if [[ -n "$K8S_CLUSTER_NAME" ]]; then
|
||||
echo " 🏗️ Cluster: $K8S_CLUSTER_NAME"
|
||||
fi
|
||||
}
|
||||
|
||||
scaleway_push() {
|
||||
local version="$1"
|
||||
if [[ -z "$version" ]]; then
|
||||
echo "❌ Error: Version is required"
|
||||
echo "Usage: scaleway_push <version> [services]"
|
||||
return 1
|
||||
fi
|
||||
|
||||
shift
|
||||
local services="$*"
|
||||
|
||||
echo "🚀 Pushing version $version to Scaleway $SCALEWAY_ENVIRONMENT..."
|
||||
|
||||
if [[ -n "$services" ]]; then
|
||||
"$SCRIPT_DIR/push_to_scaleway.sh" "$version" "$SCALEWAY_ENVIRONMENT" --services "$services"
|
||||
else
|
||||
"$SCRIPT_DIR/push_to_scaleway.sh" "$version" "$SCALEWAY_ENVIRONMENT"
|
||||
fi
|
||||
}
|
||||
|
||||
# Export functions - handle both bash and zsh
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
# In zsh, functions are automatically available in subshells
|
||||
# But we can make them available globally with typeset
|
||||
typeset -f scaleway_login scaleway_logout scaleway_info scaleway_push > /dev/null
|
||||
else
|
||||
# Bash style export
|
||||
export -f scaleway_login scaleway_logout scaleway_info scaleway_push
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Scaleway environment switched to $ENVIRONMENT"
|
||||
echo "☁️ Registry: $SCALEWAY_REGISTRY"
|
||||
if [[ -n "$KUBECTL_CONTEXT" ]]; then
|
||||
echo "⚙️ kubectl context: $KUBECTL_CONTEXT"
|
||||
fi
|
||||
echo ""
|
||||
echo "Available commands:"
|
||||
echo " scaleway_login - Login to Scaleway registry"
|
||||
echo " scaleway_logout - Logout from Scaleway registry"
|
||||
echo " scaleway_info - Show current configuration"
|
||||
echo " scaleway_push - Push version to current environment"
|
||||
echo ""
|
||||
echo "💡 Example usage:"
|
||||
echo " scaleway_login"
|
||||
echo " scaleway_push v1.2.3-alpha"
|
||||
echo " scaleway_push v2.0.0 eveai_api,eveai_workers"
|
||||
243
scaleway/scaleway_staging_setup.md
Normal file
243
scaleway/scaleway_staging_setup.md
Normal file
@@ -0,0 +1,243 @@
|
||||
### Aangepaste Analyse en Stappenplan - Definitieve Versie
|
||||
|
||||
Bedankt voor de duidelijke antwoorden! Dit geeft me alle informatie die ik nodig heb om een definitief stappenplan op te stellen.
|
||||
|
||||
### Aangepaste Situatie-analyse
|
||||
|
||||
#### **Persistent Storage Requirements**
|
||||
Je hebt gelijk - voor de **interne Prometheus** hebben we inderdaad persistent storage nodig voor:
|
||||
- **Prometheus data**: Metrics history (7-14 dagen retentie)
|
||||
- **Pushgateway data**: Temporary metrics buffer
|
||||
- **Application logs**: Via Scaleway Logs (managed)
|
||||
|
||||
#### **Logging Strategie - Helder**
|
||||
- **Application logs**: Scaleway Logs (managed, 7-14 dagen)
|
||||
- **Business event logs**: PostgreSQL (jouw controle, facturatie)
|
||||
- **Audit logs**: Niet expliciet, maar DB tracking van wijzigingen bestaat al
|
||||
|
||||
#### **Infrastructure Status**
|
||||
- **Staging cluster**: Operationeel maar leeg
|
||||
- **DNS toegang**: Via cpanel beschikbaar
|
||||
- **Secrets**: Volledig geconfigureerd in Scaleway Secret Manager
|
||||
|
||||
### Aangepast Stappenplan
|
||||
|
||||
#### **Fase 1: Infrastructure & Storage Setup (Week 1)**
|
||||
|
||||
1. **Persistent Storage Configuratie**
|
||||
```yaml
|
||||
# Scaleway Block Storage volumes
|
||||
- prometheus-data: 20GB (metrics retention)
|
||||
- pushgateway-data: 5GB (temporary buffer)
|
||||
- application-logs: 10GB (7-dagen buffer voor Scaleway Logs)
|
||||
```
|
||||
|
||||
2. **DNS & SSL Setup**
|
||||
- Configureer `evie-staging.askeveai.com` in cpanel
|
||||
- Point naar K8s LoadBalancer IP
|
||||
- Setup Let's Encrypt SSL certificaten
|
||||
|
||||
3. **Scaleway Logs Setup**
|
||||
```yaml
|
||||
# Fluent Bit DaemonSet configuratie
|
||||
# Direct shipping naar Scaleway Logs
|
||||
# 7-dagen retentie policy
|
||||
```
|
||||
|
||||
4. **External Secrets Operator**
|
||||
- Installeer ESO in K8s cluster
|
||||
- Configureer Scaleway Secret Manager integration
|
||||
- Test secrets mapping
|
||||
|
||||
#### **Fase 2: Monitoring Stack Deployment (Week 1-2)**
|
||||
|
||||
5. **Prometheus Stack met Persistent Storage**
|
||||
```yaml
|
||||
# Prometheus Deployment
|
||||
spec:
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: prometheus-data
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
|
||||
# Pushgateway Deployment
|
||||
spec:
|
||||
volumes:
|
||||
- name: pushgateway-data
|
||||
persistentVolumeClaim:
|
||||
claimName: pushgateway-pvc
|
||||
```
|
||||
|
||||
6. **Business Metrics Integratie**
|
||||
- Behoud huidige `business_event.py` logica
|
||||
- Pushgateway blijft beschikbaar op K8s
|
||||
- Configureer Prometheus scraping van pushgateway
|
||||
|
||||
7. **Scaleway Cockpit Remote Write**
|
||||
```yaml
|
||||
# Prometheus configuratie
|
||||
remote_write:
|
||||
- url: "https://metrics.cockpit.fr-par.scw.cloud/api/v1/push"
|
||||
headers:
|
||||
X-Token: "{{ scaleway_metrics_token }}"
|
||||
```
|
||||
|
||||
#### **Fase 3: Application Services Deployment (Week 2)**
|
||||
|
||||
8. **Core Services met Secrets Integration**
|
||||
```yaml
|
||||
# Deployment template voor alle 8 services
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: eveai-service
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: eveai-app-secrets
|
||||
- secretRef:
|
||||
name: eveai-postgresql-secrets
|
||||
# etc.
|
||||
```
|
||||
|
||||
9. **Ingress Controller & SSL**
|
||||
```yaml
|
||||
# Nginx Ingress met SSL
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-staging-ingress
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- evie-staging.askeveai.com
|
||||
secretName: eveai-staging-tls
|
||||
```
|
||||
|
||||
10. **Service Dependencies & Health Checks**
|
||||
- Init containers voor database migrations
|
||||
- Readiness/liveness probes voor alle services
|
||||
- Service discovery configuratie
|
||||
|
||||
#### **Fase 4: Logging & Observability (Week 2-3)**
|
||||
|
||||
11. **Scaleway Logs Integration**
|
||||
```yaml
|
||||
# Fluent Bit DaemonSet
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluent-bit
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: fluent-bit
|
||||
image: fluent/fluent-bit:latest
|
||||
env:
|
||||
- name: SCALEWAY_LOGS_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: scaleway-logs-secret
|
||||
key: token
|
||||
```
|
||||
|
||||
12. **Log Routing Configuratie**
|
||||
- Application logs → Scaleway Logs (7-dagen retentie)
|
||||
- Business events → PostgreSQL (jouw controle)
|
||||
- System logs → Scaleway Logs
|
||||
- Error logs → Scaleway Logs + alerting
|
||||
|
||||
#### **Fase 5: Testing & Go-Live (Week 3-4)**
|
||||
|
||||
13. **Deployment Automation**
|
||||
- Update `push_to_scaleway.sh` voor K8s deployment
|
||||
- Rolling update configuratie
|
||||
- Rollback procedures
|
||||
|
||||
14. **Monitoring Dashboards**
|
||||
- Scaleway Cockpit dashboards voor infrastructure
|
||||
- Custom business metrics visualisatie
|
||||
- Alerting configuratie
|
||||
|
||||
15. **Load Testing & Performance**
|
||||
- Staging environment load testing
|
||||
- Resource limits fine-tuning
|
||||
- Auto-scaling configuratie (HPA)
|
||||
|
||||
### Specifieke Storage Configuratie
|
||||
|
||||
#### **Persistent Volumes Planning**
|
||||
|
||||
```yaml
|
||||
# Prometheus Storage
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: prometheus-data-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
storageClassName: scw-bssd # Scaleway Block Storage
|
||||
|
||||
# Pushgateway Storage
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pushgateway-data-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: scw-bssd
|
||||
```
|
||||
|
||||
#### **Monitoring Stack Resource Allocation**
|
||||
|
||||
Voor staging environment:
|
||||
- **Prometheus**: 2GB RAM, 1 CPU, 20GB storage (14-dagen metrics)
|
||||
- **Pushgateway**: 512MB RAM, 0.5 CPU, 5GB storage
|
||||
- **Fluent Bit**: 256MB RAM, 0.2 CPU per node
|
||||
|
||||
### Antwoorden op je Specifieke Vragen
|
||||
|
||||
#### **1. Persistent Storage voor Prometheus**
|
||||
Ja, absoluut nodig. Zonder persistent storage verlies je alle metrics bij pod restarts. We configureren Scaleway Block Storage volumes voor:
|
||||
- Prometheus metrics data (20GB, 14-dagen retentie)
|
||||
- Pushgateway buffer (5GB)
|
||||
|
||||
#### **2. Audit Logs Verduidelijking**
|
||||
Met "audit logs" bedoelde ik system-level logs (wie heeft toegang gehad tot welke resources, API calls, etc.). Jouw DB tracking van wijzigingen is perfect en blijft zoals het is.
|
||||
|
||||
#### **3. Scaleway Logs vs Kubernetes Native**
|
||||
Scaleway Logs is inderdaad de beste keuze omdat:
|
||||
- Volledig managed (geen onderhoud)
|
||||
- Automatische retentie management
|
||||
- Geïntegreerd met Cockpit
|
||||
- Kosteneffectief voor staging
|
||||
|
||||
#### **4. Business Event Logs**
|
||||
Deze blijven in PostgreSQL zoals nu, want:
|
||||
- Nodig voor facturatie (kritieke data)
|
||||
- Jouw controle over retentie
|
||||
- Gestructureerde data voor business logic
|
||||
|
||||
### Volgende Concrete Stappen
|
||||
|
||||
1. **DNS Setup**: Configureer `evie-staging.askeveai.com` in cpanel
|
||||
2. **Storage Classes**: Verificeer Scaleway Block Storage classes in K8s
|
||||
3. **External Secrets**: Installeer ESO en test secrets mapping
|
||||
4. **Monitoring Stack**: Deploy Prometheus + Pushgateway met persistent storage
|
||||
|
||||
Wil je dat we beginnen met stap 1 (DNS setup) of heb je voorkeur voor een andere volgorde?
|
||||
Reference in New Issue
Block a user