- changes toward a fully functional k8s cluster. First running version of cluster, addition of services works, additional changes to app required.

This commit is contained in:
Josako
2025-08-14 16:58:09 +02:00
parent 7e35549262
commit 066f579294
10 changed files with 1966 additions and 0 deletions

0
docker/podman_env_switch.sh Normal file → Executable file
View File

106
k8s/dev/config-secrets.yaml Normal file
View File

@@ -0,0 +1,106 @@
# ConfigMaps and Secrets for EveAI Dev Environment
# File: config-secrets.yaml
---
# Namespace for dev environment
apiVersion: v1
kind: Namespace
metadata:
name: eveai-dev
labels:
environment: dev
app: eveai
---
# Non-sensitive configuration
apiVersion: v1
kind: ConfigMap
metadata:
name: eveai-config
namespace: eveai-dev
data:
# Database configuration (points to external PostgreSQL)
DB_HOST: "host.docker.internal" # Will resolve to host IP from inside Kind
DB_PORT: "5432"
DB_NAME: "eveai_dev"
DB_USER: "luke"
# Redis configuration (internal to cluster)
REDIS_URL: "redis-service"
REDIS_PORT: "6379"
# MinIO configuration (internal to cluster)
MINIO_ENDPOINT: "minio-service:9000"
MINIO_ACCESS_KEY: "minioadmin"
# Application settings
FLASK_ENV: "development"
FLASK_DEBUG: "true"
# Flower configuration
FLOWER_USER: "Felucia"
# Nginx configuration
NGINX_SERVER_NAME: "localhost http://minty.ask-eve-ai-local.com/"
# CrewAI configuration
CREWAI_STORAGE_DIR: "/app/crewai_storage"
# Monitoring configuration
PUSH_GATEWAY_HOST: "pushgateway-service"
PUSH_GATEWAY_PORT: "9091"
# Email configuration
SW_EMAIL_SENDER: "admin_dev@mail.askeveai.be"
SW_EMAIL_NAME: "Evie Admin (dev)"
SW_PROJECT: "f282f55a-ea52-4538-a979-5bcb890717ab"
---
# Sensitive configuration
apiVersion: v1
kind: Secret
metadata:
name: eveai-secrets
namespace: eveai-dev
type: Opaque
data:
# Database password (base64 encoded)
DB_PASS: U2t5d2Fsa2VyIQ== # "Skywalker!"
# API Keys (base64 encoded)
OPENAI_API_KEY: c2stcHJvai04UjBqV3p3akw3UGVvUHlNaEpUWlQzQmxia0ZKTGI2SGZSR0JIcjljRVZGV0VoVTc=
GROQ_API_KEY: Z3NrX0dIZlRkcFlwbmFTS1pGSklzSlJBV0dkeWIzRlkzNWN2RjZBTHBMVThEYzR0SUZMVWZRNA==
MISTRAL_API_KEY: MGY0WmlRMWtJcGdJS1RIWDhkMGE4R09EMnZBZ1ZxRW4=
ANTHROPIC_API_KEY: c2stYW50LWFwaTAzLWMyVG1remJSZWVHaFhCTzVKeE5INkJKTnlsUkRvbmM5R21aZDBIZRbrvVyeWVrZWMyVHJ2eWVrZWMyVGpOeWVrZWMybYk95Z1k=
LANGCHAIN_API_KEY: bHN2Ml9za180ZmViMWU2MDVlNzA0MGFlYjM1N2M1OTAyNWZiZWEzMl9jNWU4NWVjNDEx
SERPER_API_KEY: ZTRjNTUzODU2ZDBlNmI1YTE3MWVjNWU2YjY5ZDg3NDI4NWI5YmFkZg==
# Application secrets
SECRET_KEY: OTc4NjdjMTQ5MWJlYTVlZTZhOGU4NDM2ZWIxMWJmMmJhNmE2OWZmNTNhYjFiMTdlY2JhNDUwZDBmMmU1NzJlMQ==
SECURITY_PASSWORD_SALT: MjI4NjE0ODU5NDM5MTIzMjY0MDM1NTY1NTY4NzYxNDMzNjA3MjM1
JWT_SECRET_KEY: YnNkTWttUThPYmZNRDUyeUFGZzR0cnJ2amdqTWh1SXFnMmZqRHBEL0pxdmdZMGNjQ2NtbHNFblZGbVI3OVdQaUxLRUEzaThhNXptZWp3TFpLbDR2OVE9PQ==
API_ENCRYPTION_KEY: eGZGNTM2OUlzcmVkU3JscllaUWtNOVpOcmZVQVNZWVM2VENjQVI5VUtqND0=
# MinIO secret
MINIO_SECRET_KEY: bWluaW9hZG1pbg== # "minioadmin"
# Flower password
FLOWER_PASSWORD: SmVsZW5z # "Jungles"
# Email configuration
SW_EMAIL_ACCESS_KEY: U0NXRk1ROTM3MkhONFlHS0YwNFNXMA==
SW_EMAIL_SECRET_KEY: ZWM4NDYwNGMtZTJkNC00YjBkLWExMjAtNDA0MjA2OTNmNDJh
---
# External Service for PostgreSQL (points to host database)
apiVersion: v1
kind: Service
metadata:
name: postgres-external
namespace: eveai-dev
spec:
type: ExternalName
externalName: host.docker.internal
ports:
- port: 5432
targetPort: 5432
protocol: TCP

242
k8s/dev/deploy-all-services.sh Executable file
View File

@@ -0,0 +1,242 @@
#!/bin/bash
# Deploy All EveAI Dev Services Script
# File: deploy-all-services.sh
set -e
# Colors voor output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function voor colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if kubectl is pointing to the right cluster
check_cluster_context() {
print_status "Checking cluster context..."
CURRENT_CONTEXT=$(kubectl config current-context)
if [[ "$CURRENT_CONTEXT" != "kind-eveai-dev-cluster" ]]; then
print_error "Wrong cluster context: $CURRENT_CONTEXT"
print_error "Expected: kind-eveai-dev-cluster"
echo "Switch context with: kubectl config use-context kind-eveai-dev-cluster"
exit 1
fi
print_success "Using correct cluster context: $CURRENT_CONTEXT"
}
# Wait for pods to be ready
wait_for_pods() {
local namespace=$1
local app_label=$2
local timeout=${3:-300}
print_status "Waiting for $app_label pods to be ready..."
if kubectl wait --for=condition=Ready pods -l app=$app_label -n $namespace --timeout=${timeout}s; then
print_success "$app_label pods are ready"
return 0
else
print_error "$app_label pods failed to become ready within ${timeout}s"
return 1
fi
}
# Deploy services in correct order
deploy_infrastructure() {
print_status "Deploying infrastructure services (Redis, MinIO)..."
if kubectl apply -f redis-minio-services.yaml; then
print_success "Infrastructure services deployed"
else
print_error "Failed to deploy infrastructure services"
exit 1
fi
# Wait for infrastructure to be ready
wait_for_pods "eveai-dev" "redis" 180
wait_for_pods "eveai-dev" "minio" 300
}
deploy_application_services() {
print_status "Deploying EveAI application services..."
if kubectl apply -f eveai-services.yaml; then
print_success "Application services deployed"
else
print_error "Failed to deploy application services"
exit 1
fi
# Wait for key services to be ready
wait_for_pods "eveai-dev" "eveai-app" 180
wait_for_pods "eveai-dev" "eveai-api" 180
wait_for_pods "eveai-dev" "eveai-chat-client" 180
}
deploy_nginx_monitoring() {
print_status "Deploying Nginx and monitoring services..."
if kubectl apply -f nginx-monitoring-services.yaml; then
print_success "Nginx and monitoring services deployed"
else
print_error "Failed to deploy Nginx and monitoring services"
exit 1
fi
# Wait for nginx and monitoring to be ready
wait_for_pods "eveai-dev" "nginx" 120
wait_for_pods "eveai-dev" "prometheus" 180
wait_for_pods "eveai-dev" "grafana" 180
}
# Check service status
check_services() {
print_status "Checking service status..."
echo ""
print_status "Pods status:"
kubectl get pods -n eveai-dev
echo ""
print_status "Services status:"
kubectl get services -n eveai-dev
echo ""
print_status "Persistent Volume Claims:"
kubectl get pvc -n eveai-dev
}
# Test service connectivity
test_connectivity() {
print_status "Testing service connectivity..."
# Test endpoints that should respond
endpoints=(
"http://localhost:3080" # Nginx
"http://localhost:3001/healthz/ready" # EveAI App
"http://localhost:3003/healthz/ready" # EveAI API
"http://localhost:3004/healthz/ready" # Chat Client
"http://localhost:3009" # MinIO Console
"http://localhost:3010" # Prometheus
"http://localhost:3012" # Grafana
)
for endpoint in "${endpoints[@]}"; do
print_status "Testing $endpoint..."
if curl -f -s --max-time 10 "$endpoint" > /dev/null; then
print_success "$endpoint is responding"
else
print_warning "$endpoint is not responding (may still be starting up)"
fi
done
}
# Show connection information
show_connection_info() {
echo ""
echo "=================================================="
print_success "EveAI Dev Cluster deployed successfully!"
echo "=================================================="
echo ""
echo "🌐 Service URLs:"
echo " Main Application:"
echo " • Nginx Proxy: http://minty.ask-eve-ai-local.com:3080"
echo " • EveAI App: http://minty.ask-eve-ai-local.com:3001"
echo " • EveAI API: http://minty.ask-eve-ai-local.com:3003"
echo " • Chat Client: http://minty.ask-eve-ai-local.com:3004"
echo ""
echo " Infrastructure:"
echo " • Redis: redis://minty.ask-eve-ai-local.com:3006"
echo " • MinIO S3: http://minty.ask-eve-ai-local.com:3008"
echo " • MinIO Console: http://minty.ask-eve-ai-local.com:3009"
echo ""
echo " Monitoring:"
echo " • Flower (Celery): http://minty.ask-eve-ai-local.com:3007"
echo " • Prometheus: http://minty.ask-eve-ai-local.com:3010"
echo " • Grafana: http://minty.ask-eve-ai-local.com:3012"
echo ""
echo "🔑 Default Credentials:"
echo " • MinIO: minioadmin / minioadmin"
echo " • Grafana: admin / admin"
echo " • Flower: Felucia / Jungles"
echo ""
echo "🛠️ Management Commands:"
echo " • kubectl get all -n eveai-dev"
echo " • kubectl logs -f deployment/eveai-app -n eveai-dev"
echo " • kubectl describe pod <pod-name> -n eveai-dev"
echo ""
echo "🗂️ Data Persistence:"
echo " • Host data path: $HOME/k8s-data/dev/"
echo " • Logs path: $HOME/k8s-data/dev/logs/"
}
# Main execution
main() {
echo "=================================================="
echo "🚀 Deploying EveAI Dev Services to Kind Cluster"
echo "=================================================="
check_cluster_context
# Deploy in stages
deploy_infrastructure
print_status "Infrastructure deployment completed, proceeding with applications..."
sleep 5
deploy_application_services
print_status "Application deployment completed, proceeding with Nginx and monitoring..."
sleep 5
deploy_nginx_monitoring
print_status "All services deployed, running final checks..."
sleep 10
check_services
test_connectivity
show_connection_info
}
# Check for command line options
case "${1:-}" in
"infrastructure")
check_cluster_context
deploy_infrastructure
;;
"apps")
check_cluster_context
deploy_application_services
;;
"monitoring")
check_cluster_context
deploy_nginx_monitoring
;;
"status")
check_cluster_context
check_services
;;
"test")
test_connectivity
;;
*)
main "$@"
;;
esac

469
k8s/dev/eveai-services.yaml Normal file
View File

@@ -0,0 +1,469 @@
# EveAI Application Services for Dev Environment
# File: eveai-services.yaml
---
# Shared Logs PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: app-logs-pvc
namespace: eveai-dev
spec:
accessModes:
- ReadWriteMany
storageClassName: local-storage
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: eveai
environment: dev
---
# EveAI App Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-app
namespace: eveai-dev
labels:
app: eveai-app
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: eveai-app
template:
metadata:
labels:
app: eveai-app
spec:
containers:
- name: eveai-app
image: registry.ask-eve-ai-local.com/josakola/eveai_app:latest
ports:
- containerPort: 5001
- containerPort: 8000
env:
- name: COMPONENT_NAME
value: "eveai_app"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
livenessProbe:
httpGet:
path: /healthz/ready
port: 5001
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz/ready
port: 5001
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 10
failureThreshold: 3
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "2Gi"
cpu: "1000m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# EveAI App Service
apiVersion: v1
kind: Service
metadata:
name: eveai-app-service
namespace: eveai-dev
labels:
app: eveai-app
spec:
type: NodePort
ports:
- port: 5001
targetPort: 5001
nodePort: 30001 # Maps to host port 3001
protocol: TCP
selector:
app: eveai-app
---
# EveAI API Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-api
namespace: eveai-dev
labels:
app: eveai-api
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: eveai-api
template:
metadata:
labels:
app: eveai-api
spec:
containers:
- name: eveai-api
image: registry.ask-eve-ai-local.com/josakola/eveai_api:latest
ports:
- containerPort: 5003
- containerPort: 8000
env:
- name: COMPONENT_NAME
value: "eveai_api"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
livenessProbe:
httpGet:
path: /healthz/ready
port: 5003
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz/ready
port: 5003
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 10
failureThreshold: 3
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "2Gi"
cpu: "1000m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# EveAI API Service
apiVersion: v1
kind: Service
metadata:
name: eveai-api-service
namespace: eveai-dev
labels:
app: eveai-api
spec:
type: NodePort
ports:
- port: 5003
targetPort: 5003
nodePort: 30003 # Maps to host port 3003
protocol: TCP
selector:
app: eveai-api
---
# EveAI Chat Client Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-chat-client
namespace: eveai-dev
labels:
app: eveai-chat-client
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: eveai-chat-client
template:
metadata:
labels:
app: eveai-chat-client
spec:
containers:
- name: eveai-chat-client
image: registry.ask-eve-ai-local.com/josakola/eveai_chat_client:latest
ports:
- containerPort: 5004
- containerPort: 8000
env:
- name: COMPONENT_NAME
value: "eveai_chat_client"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
livenessProbe:
httpGet:
path: /healthz/ready
port: 5004
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /healthz/ready
port: 5004
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 10
failureThreshold: 3
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "2Gi"
cpu: "1000m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# EveAI Chat Client Service
apiVersion: v1
kind: Service
metadata:
name: eveai-chat-client-service
namespace: eveai-dev
labels:
app: eveai-chat-client
spec:
type: NodePort
ports:
- port: 5004
targetPort: 5004
nodePort: 30004 # Maps to host port 3004
protocol: TCP
selector:
app: eveai-chat-client
---
# EveAI Workers Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-workers
namespace: eveai-dev
labels:
app: eveai-workers
environment: dev
spec:
replicas: 2 # Multiple workers for parallel processing
selector:
matchLabels:
app: eveai-workers
template:
metadata:
labels:
app: eveai-workers
spec:
containers:
- name: eveai-workers
image: registry.ask-eve-ai-local.com/josakola/eveai_workers:latest
ports:
- containerPort: 8000
env:
- name: COMPONENT_NAME
value: "eveai_workers"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "2Gi"
cpu: "1000m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# EveAI Chat Workers Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-chat-workers
namespace: eveai-dev
labels:
app: eveai-chat-workers
environment: dev
spec:
replicas: 2 # Multiple workers for parallel processing
selector:
matchLabels:
app: eveai-chat-workers
template:
metadata:
labels:
app: eveai-chat-workers
spec:
containers:
- name: eveai-chat-workers
image: registry.ask-eve-ai-local.com/josakola/eveai_chat_workers:latest
ports:
- containerPort: 8000
env:
- name: COMPONENT_NAME
value: "eveai_chat_workers"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "2Gi"
cpu: "1000m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# EveAI Beat Deployment (Celery scheduler)
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-beat
namespace: eveai-dev
labels:
app: eveai-beat
environment: dev
spec:
replicas: 1 # Only one beat scheduler needed
selector:
matchLabels:
app: eveai-beat
template:
metadata:
labels:
app: eveai-beat
spec:
containers:
- name: eveai-beat
image: registry.ask-eve-ai-local.com/josakola/eveai_beat:latest
env:
- name: COMPONENT_NAME
value: "eveai_beat"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# EveAI Entitlements Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: eveai-entitlements
namespace: eveai-dev
labels:
app: eveai-entitlements
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: eveai-entitlements
template:
metadata:
labels:
app: eveai-entitlements
spec:
containers:
- name: eveai-entitlements
image: registry.ask-eve-ai-local.com/josakola/eveai_entitlements:latest
ports:
- containerPort: 8000
env:
- name: COMPONENT_NAME
value: "eveai_entitlements"
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: app-logs
mountPath: /app/logs
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "500m"
volumes:
- name: app-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always

View File

@@ -0,0 +1,108 @@
# Kind configuration for EveAI Dev Environment
# File: kind-dev-cluster.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: eveai-dev-cluster
networking:
# API server configuration
apiServerAddress: "127.0.0.1"
apiServerPort: 3000
# Pod subnet (avoid conflicts with host network)
podSubnet: "10.244.0.0/16"
serviceSubnet: "10.96.0.0/12"
nodes:
- role: control-plane
# Extra port mappings to host (minty) according to port schema 3000-3999
extraPortMappings:
# Nginx - Main entry point
- containerPort: 80
hostPort: 3080
protocol: TCP
- containerPort: 443
hostPort: 3443
protocol: TCP
# EveAI App
- containerPort: 30001
hostPort: 3001
protocol: TCP
# EveAI API
- containerPort: 30003
hostPort: 3003
protocol: TCP
# EveAI Chat Client
- containerPort: 30004
hostPort: 3004
protocol: TCP
# Redis
- containerPort: 30006
hostPort: 3006
protocol: TCP
# Flower (Celery monitoring)
- containerPort: 30007
hostPort: 3007
protocol: TCP
# MinIO S3 API
- containerPort: 30008
hostPort: 3008
protocol: TCP
# MinIO Console
- containerPort: 30009
hostPort: 3009
protocol: TCP
# Prometheus
- containerPort: 30010
hostPort: 3010
protocol: TCP
# Pushgateway
- containerPort: 30011
hostPort: 3011
protocol: TCP
# Grafana
- containerPort: 30012
hostPort: 3012
protocol: TCP
# Mount points for persistent data on host
extraMounts:
# MinIO data persistence
- hostPath: $HOME/k8s-data/dev/minio
containerPath: /mnt/minio-data
# Redis data persistence
- hostPath: $HOME/k8s-data/dev/redis
containerPath: /mnt/redis-data
# Application logs
- hostPath: $HOME/k8s-data/dev/logs
containerPath: /mnt/app-logs
# Prometheus data
- hostPath: $HOME/k8s-data/dev/prometheus
containerPath: /mnt/prometheus-data
# Grafana data
- hostPath: $HOME/k8s-data/dev/grafana
containerPath: /mnt/grafana-data
# mkcert CA certificate
- hostPath: $HOME/k8s-data/dev/certs
containerPath: /usr/local/share/ca-certificates
# Configure registry access
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.ask-eve-ai-local.com"]
endpoint = ["https://registry.ask-eve-ai-local.com"]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."registry.ask-eve-ai-local.com".tls]
ca_file = "/usr/local/share/ca-certificates/mkcert-ca.crt"
insecure_skip_verify = false

View File

@@ -0,0 +1,419 @@
# Nginx and Monitoring Services for EveAI Dev Environment
# File: nginx-monitoring-services.yaml
---
# Nginx Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: eveai-dev
labels:
app: nginx
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: registry.ask-eve-ai-local.com/josakola/nginx:latest
ports:
- containerPort: 80
- containerPort: 443
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
volumeMounts:
- name: nginx-logs
mountPath: /var/log/nginx
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: nginx-logs
persistentVolumeClaim:
claimName: app-logs-pvc
restartPolicy: Always
---
# Nginx Service
apiVersion: v1
kind: Service
metadata:
name: nginx-service
namespace: eveai-dev
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30080 # Maps to host port 3080
protocol: TCP
name: http
- port: 443
targetPort: 443
nodePort: 30443 # Maps to host port 3443
protocol: TCP
name: https
selector:
app: nginx
---
# Flower (Celery Monitoring) Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: flower
namespace: eveai-dev
labels:
app: flower
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: flower
template:
metadata:
labels:
app: flower
spec:
containers:
- name: flower
image: registry.ask-eve-ai-local.com/josakola/flower:latest
ports:
- containerPort: 5555
envFrom:
- configMapRef:
name: eveai-config
- secretRef:
name: eveai-secrets
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "300m"
restartPolicy: Always
---
# Flower Service
apiVersion: v1
kind: Service
metadata:
name: flower-service
namespace: eveai-dev
labels:
app: flower
spec:
type: NodePort
ports:
- port: 5555
targetPort: 5555
nodePort: 30007 # Maps to host port 3007
protocol: TCP
selector:
app: flower
---
# Prometheus PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prometheus-data-pvc
namespace: eveai-dev
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: prometheus
environment: dev
---
# Prometheus Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: eveai-dev
labels:
app: prometheus
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
containers:
- name: prometheus
image: registry.ask-eve-ai-local.com/josakola/prometheus:latest
ports:
- containerPort: 9090
args:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
volumeMounts:
- name: prometheus-data
mountPath: /prometheus
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /-/ready
port: 9090
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "2Gi"
cpu: "1000m"
volumes:
- name: prometheus-data
persistentVolumeClaim:
claimName: prometheus-data-pvc
restartPolicy: Always
---
# Prometheus Service
apiVersion: v1
kind: Service
metadata:
name: prometheus-service
namespace: eveai-dev
labels:
app: prometheus
spec:
type: NodePort
ports:
- port: 9090
targetPort: 9090
nodePort: 30010 # Maps to host port 3010
protocol: TCP
selector:
app: prometheus
---
# Pushgateway Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: pushgateway
namespace: eveai-dev
labels:
app: pushgateway
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: pushgateway
template:
metadata:
labels:
app: pushgateway
spec:
containers:
- name: pushgateway
image: prom/pushgateway:latest
ports:
- containerPort: 9091
livenessProbe:
httpGet:
path: /-/healthy
port: 9091
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /-/ready
port: 9091
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "300m"
restartPolicy: Always
---
# Pushgateway Service
apiVersion: v1
kind: Service
metadata:
name: pushgateway-service
namespace: eveai-dev
labels:
app: pushgateway
spec:
type: NodePort
ports:
- port: 9091
targetPort: 9091
nodePort: 30011 # Maps to host port 3011
protocol: TCP
selector:
app: pushgateway
---
# Grafana PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-data-pvc
namespace: eveai-dev
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
resources:
requests:
storage: 1Gi
selector:
matchLabels:
app: grafana
environment: dev
---
# Grafana Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
namespace: eveai-dev
labels:
app: grafana
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- name: grafana
image: registry.ask-eve-ai-local.com/josakola/grafana:latest
ports:
- containerPort: 3000
env:
- name: GF_SECURITY_ADMIN_USER
value: "admin"
- name: GF_SECURITY_ADMIN_PASSWORD
value: "admin"
- name: GF_USERS_ALLOW_SIGN_UP
value: "false"
volumeMounts:
- name: grafana-data
mountPath: /var/lib/grafana
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "500m"
volumes:
- name: grafana-data
persistentVolumeClaim:
claimName: grafana-data-pvc
restartPolicy: Always
---
# Grafana Service
apiVersion: v1
kind: Service
metadata:
name: grafana-service
namespace: eveai-dev
labels:
app: grafana
spec:
type: NodePort
ports:
- port: 3000
targetPort: 3000
nodePort: 30012 # Maps to host port 3012
protocol: TCP
selector:
app: grafana

View File

@@ -0,0 +1,145 @@
# Persistent Volumes for EveAI Dev Environment
# File: persistent-volumes.yaml
---
# MinIO Data Storage
apiVersion: v1
kind: PersistentVolume
metadata:
name: minio-data-pv
labels:
app: minio
environment: dev
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/minio-data
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- eveai-dev-cluster-control-plane
---
# Redis Data Storage
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-data-pv
labels:
app: redis
environment: dev
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/redis-data
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- eveai-dev-cluster-control-plane
---
# Application Logs Storage
apiVersion: v1
kind: PersistentVolume
metadata:
name: app-logs-pv
labels:
app: eveai
environment: dev
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/app-logs
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- eveai-dev-cluster-control-plane
---
# Prometheus Data Storage
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-data-pv
labels:
app: prometheus
environment: dev
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/prometheus-data
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- eveai-dev-cluster-control-plane
---
# Grafana Data Storage
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-data-pv
labels:
app: grafana
environment: dev
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /mnt/grafana-data
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- eveai-dev-cluster-control-plane
---
# StorageClass for local storage
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,238 @@
# Redis and MinIO Services for EveAI Dev Environment
# File: redis-minio-services.yaml
---
# Redis Persistent Volume Claim
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-data-pvc
namespace: eveai-dev
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
resources:
requests:
storage: 2Gi
selector:
matchLabels:
app: redis
environment: dev
---
# Redis Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: eveai-dev
labels:
app: redis
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7.2.5
ports:
- containerPort: 6379
volumeMounts:
- name: redis-data
mountPath: /data
livenessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
volumes:
- name: redis-data
persistentVolumeClaim:
claimName: redis-data-pvc
restartPolicy: Always
---
# Redis Service
apiVersion: v1
kind: Service
metadata:
name: redis-service
namespace: eveai-dev
labels:
app: redis
spec:
type: NodePort
ports:
- port: 6379
targetPort: 6379
nodePort: 30006 # Maps to host port 3006
protocol: TCP
selector:
app: redis
---
# MinIO Persistent Volume Claim
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-data-pvc
namespace: eveai-dev
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
resources:
requests:
storage: 10Gi
selector:
matchLabels:
app: minio
environment: dev
---
# MinIO Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio
namespace: eveai-dev
labels:
app: minio
environment: dev
spec:
replicas: 1
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: minio/minio
command:
- minio
- server
- /data
- --console-address
- ":9001"
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ROOT_USER
valueFrom:
configMapKeyRef:
name: eveai-config
key: MINIO_ACCESS_KEY
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: eveai-secrets
key: MINIO_SECRET_KEY
volumeMounts:
- name: minio-data
mountPath: /data
livenessProbe:
httpGet:
path: /minio/health/live
port: 9000
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 3
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "1000m"
volumes:
- name: minio-data
persistentVolumeClaim:
claimName: minio-data-pvc
restartPolicy: Always
---
# MinIO Service (API)
apiVersion: v1
kind: Service
metadata:
name: minio-service
namespace: eveai-dev
labels:
app: minio
spec:
type: NodePort
ports:
- port: 9000
targetPort: 9000
nodePort: 30008 # Maps to host port 3008
protocol: TCP
name: api
selector:
app: minio
---
# MinIO Console Service
apiVersion: v1
kind: Service
metadata:
name: minio-console-service
namespace: eveai-dev
labels:
app: minio
spec:
type: NodePort
ports:
- port: 9001
targetPort: 9001
nodePort: 30009 # Maps to host port 3009
protocol: TCP
name: console
selector:
app: minio

228
k8s/dev/setup-dev-cluster.sh Executable file
View File

@@ -0,0 +1,228 @@
#!/bin/bash
# Setup script voor EveAI Dev Kind Cluster
# File: setup-dev-cluster.sh
set -e
echo "🚀 Setting up EveAI Dev Kind Cluster..."
# Colors voor output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function voor colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if required tools are installed
check_prerequisites() {
print_status "Checking prerequisites..."
if ! command -v kind &> /dev/null; then
print_error "kind is not installed. Please install kind first."
echo "Install via: go install sigs.k8s.io/kind@latest"
exit 1
fi
if ! command -v kubectl &> /dev/null; then
print_error "kubectl is not installed. Please install kubectl first."
exit 1
fi
if ! command -v podman &> /dev/null; then
print_error "podman is not installed. Please install podman first."
exit 1
fi
if ! command -v envsubst &> /dev/null; then
print_error "envsubst is not installed. Please install envsubst first"
fi
print_success "All prerequisites are installed"
}
# Create host directories for persistent volumes
create_host_directories() {
print_status "Creating host directories for persistent storage..."
BASE_DIR="$HOME/k8s-data/dev"
directories=(
"$BASE_DIR/minio"
"$BASE_DIR/redis"
"$BASE_DIR/logs"
"$BASE_DIR/prometheus"
"$BASE_DIR/grafana"
"$BASE_DIR/certs"
)
for dir in "${directories[@]}"; do
if [ ! -d "$dir" ]; then
mkdir -p "$dir"
print_status "Created directory: $dir"
else
print_status "Directory already exists: $dir"
fi
done
# Set proper permissions
chmod -R 755 "$BASE_DIR"
print_success "Host directories created and configured"
}
# Create Kind cluster
create_cluster() {
print_status "Creating Kind cluster..."
if kind get clusters | grep -q "eveai-dev-cluster"; then
print_warning "Cluster 'eveai-dev-cluster' already exists"
echo -n "Do you want to delete and recreate it? (y/N): "
read -r response
if [[ "$response" =~ ^[Yy]$ ]]; then
print_status "Deleting existing cluster..."
kind delete cluster --name eveai-dev-cluster
else
print_status "Using existing cluster"
return 0
fi
fi
KIND_CONFIG="kind-dev-cluster.yaml"
if [ ! -f "${KIND_CONFIG}" ]; then
print_error "Config '${KIND_CONFIG}' niet gevonden in $(pwd)"
exit 1
fi
print_status "Creating new Kind cluster with configuration..."
# Genereer expanded config met envsubst
EXPANDED_CONFIG="$(mktemp --suffix=.yaml)"
envsubst < "${KIND_CONFIG}" > "${EXPANDED_CONFIG}"
# Voorkeursmethode: start in user-scope met expliciete delegatie
if command -v systemd-run >/dev/null 2>&1; then
systemd-run --scope --user -p "Delegate=yes" \
env KIND_EXPERIMENTAL_PROVIDER=podman \
kind create cluster --name "${CLUSTER_NAME}" --config "${EXPANDED_CONFIG}"
else
# Fallback
print_warning "Start zonder systemd-run scope; kan mislukken bij ontbrekende delegatie."
kind create cluster --name "${CLUSTER_NAME}" --config "${EXPANDED_CONFIG}"
fi
# Cleanup temporary config
rm -f "${EXPANDED_CONFIG}"
# Wait for cluster to be ready
print_status "Waiting for cluster to be ready..."
kubectl wait --for=condition=Ready nodes --all --timeout=300s
# Update CA certificates in Kind node
print_status "Updating CA certificates in cluster..."
docker exec eveai-dev-cluster-control-plane update-ca-certificates
docker exec eveai-dev-cluster-control-plane systemctl restart containerd
print_success "Kind cluster created successfully"
}
# Apply Kubernetes manifests
apply_manifests() {
print_status "Applying Kubernetes manifests..."
# Apply in correct order
manifests=(
"persistent-volumes.yaml"
"config-secrets.yaml"
)
for manifest in "${manifests[@]}"; do
if [ -f "$manifest" ]; then
print_status "Applying $manifest..."
kubectl apply -f "$manifest"
else
print_warning "Manifest $manifest not found, skipping..."
fi
done
print_success "Base manifests applied successfully"
}
# Verify cluster status
verify_cluster() {
print_status "Verifying cluster status..."
# Check nodes
print_status "Cluster nodes:"
kubectl get nodes
# Check namespaces
print_status "Namespaces:"
kubectl get namespaces
# Check persistent volumes
print_status "Persistent volumes:"
kubectl get pv
# Check if registry is accessible from cluster
print_status "Testing registry connectivity..."
if kubectl run test-registry --image=registry.ask-eve-ai-local.com/josakola/nginx:latest --dry-run=server &> /dev/null; then
print_success "Registry is accessible from cluster"
kubectl delete pod test-registry --ignore-not-found=true &> /dev/null || true
else
print_warning "Registry connectivity test failed - this might be expected if images aren't pushed yet"
fi
}
# Main execution
main() {
echo "=================================================="
echo "🏗️ EveAI Dev Kind Cluster Setup"
echo "=================================================="
check_prerequisites
create_host_directories
create_cluster
apply_manifests
verify_cluster
echo ""
echo "=================================================="
print_success "EveAI Dev Kind Cluster setup completed!"
echo "=================================================="
echo ""
echo "📋 Next steps:"
echo "1. Deploy your application services using the service manifests"
echo "2. Configure DNS entries for local development"
echo "3. Access services via the mapped ports (3000-3999 range)"
echo ""
echo "🔧 Useful commands:"
echo " kubectl config current-context # Verify you're using the right cluster"
echo " kubectl get all -n eveai-dev # Check all resources in dev namespace"
echo " kind delete cluster --name eveai-dev-cluster # Delete cluster when done"
echo ""
echo "📊 Port mappings:"
echo " - Nginx: http://minty.ask-eve-ai-local.com:3080"
echo " - EveAI App: http://minty.ask-eve-ai-local.com:3001"
echo " - EveAI API: http://minty.ask-eve-ai-local.com:3003"
echo " - Chat Client: http://minty.ask-eve-ai-local.com:3004"
echo " - MinIO Console: http://minty.ask-eve-ai-local.com:3009"
echo " - Grafana: http://minty.ask-eve-ai-local.com:3012"
}
# Run main function
main "$@"

11
scripts/sync_evie_to_minty.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -euo pipefail
rsync -avzm --delete \
--include='*/' \
--include='*.sh' \
--include='*.yaml' \
--exclude='*' \
../docker/ minty:/home/pieter/bin/evie/docker/
rsync -avz --delete ../k8s/ minty:/home/pieter/bin/evie/k8s/