- Voorlopige (werkende) setup tem verification service, bunny integratie, ...
This commit is contained in:
7
scaleway/configs/production.conf
Normal file
7
scaleway/configs/production.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
# Scaleway Production Environment Configuration (for later)
|
||||
SCALEWAY_REGISTRY="rg.fr-par.scw.cloud/eveai-production"
|
||||
SCALEWAY_API_KEY="your_production_api_key_here"
|
||||
SCALEWAY_REGION="fr-par"
|
||||
SCALEWAY_PROJECT_ID="production_project_id"
|
||||
K8S_CLUSTER_NAME="eveai-production-cluster"
|
||||
K8S_CONTEXT="scaleway-production"
|
||||
7
scaleway/configs/staging.conf
Normal file
7
scaleway/configs/staging.conf
Normal file
@@ -0,0 +1,7 @@
|
||||
# Scaleway Staging Environment Configuration
|
||||
SCALEWAY_REGISTRY="rg.fr-par.scw.cloud/eveai-staging"
|
||||
SCALEWAY_API_KEY="7cd4b2c4-448b-4a88-9119-0eb7f192c137"
|
||||
SCALEWAY_REGION="fr-par"
|
||||
SCALEWAY_PROJECT_ID="ad7d2ed9-252b-4b2a-9f4c-daca3edc4c4b"
|
||||
K8S_CLUSTER_NAME="eveai-staging"
|
||||
K8S_CONTEXT="eveai-staging"
|
||||
88
scaleway/manifests/staging/cert-manager-setup.yaml
Normal file
88
scaleway/manifests/staging/cert-manager-setup.yaml
Normal file
@@ -0,0 +1,88 @@
|
||||
# cert-manager-setup.yaml
|
||||
# Install cert-manager for automatic SSL certificate management
|
||||
|
||||
# Install cert-manager CRDs first
|
||||
# kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml
|
||||
|
||||
# cert-manager namespace
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
||||
|
||||
---
|
||||
# ClusterIssuer for Let's Encrypt staging (test first)
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
email: pieter@askeveai.com # CHANGE THIS
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
|
||||
---
|
||||
# ClusterIssuer for Let's Encrypt production
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: pieter@askeveai.com # CHANGE THIS
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
|
||||
---
|
||||
# Updated ingress with TLS configuration
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-staging-ingress-https
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: eveai
|
||||
environment: staging
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
|
||||
# Use staging issuer first for testing
|
||||
cert-manager.io/cluster-issuer: letsencrypt-staging
|
||||
# After verification, switch to: letsencrypt-prod
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- evie-staging.askeveai.com
|
||||
secretName: evie-staging-tls
|
||||
rules:
|
||||
- host: evie-staging.askeveai.com
|
||||
http:
|
||||
paths:
|
||||
- path: /verify
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
285
scaleway/manifests/staging/ingress-controller-setup.yaml
Normal file
285
scaleway/manifests/staging/ingress-controller-setup.yaml
Normal file
@@ -0,0 +1,285 @@
|
||||
# ingress-controller-setup.yaml
|
||||
# NGINX Ingress Controller voor gebruik met externe LoadBalancer
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
|
||||
---
|
||||
# Ingress Controller Deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
serviceAccountName: ingress-nginx
|
||||
containers:
|
||||
- name: controller
|
||||
image: registry.k8s.io/ingress-nginx/controller:v1.8.2
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-controller-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
- name: webhook
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
volumeMounts:
|
||||
- mountPath: /usr/local/certificates/
|
||||
name: webhook-cert
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
|
||||
---
|
||||
# NodePort Service - Dit is waar je LoadBalancer naar wijst!
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
nodePort: 30080 # Externe LoadBalancer wijst naar dit poort op elke node
|
||||
- port: 443
|
||||
targetPort: 443
|
||||
protocol: TCP
|
||||
name: https
|
||||
nodePort: 30443 # Voor HTTPS traffic
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
|
||||
---
|
||||
# ServiceAccount en RBAC
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "endpoints", "nodes", "pods", "secrets", "namespaces"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
resourceNames: ["ingress-nginx-controller"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingressclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: ["endpointslices"]
|
||||
verbs: ["list", "watch", "get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "pods", "secrets", "endpoints"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources: ["ingressclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["ingress-controller-leader"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: ["endpointslices"]
|
||||
verbs: ["list", "watch", "get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
|
||||
---
|
||||
# ConfigMap voor Ingress Controller configuratie
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
|
||||
---
|
||||
# IngressClass definitie
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
502
scaleway/manifests/staging/staging-test-setup.yaml
Normal file
502
scaleway/manifests/staging/staging-test-setup.yaml
Normal file
@@ -0,0 +1,502 @@
|
||||
# staging-test-setup.yaml
|
||||
# Complete test and debug setup for EveAI staging environment
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: eveai-staging
|
||||
labels:
|
||||
environment: staging
|
||||
app: eveai
|
||||
|
||||
---
|
||||
# ConfigMap with HTML content for the test interface
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: verify-content
|
||||
namespace: eveai-staging
|
||||
data:
|
||||
index.html: |
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>EveAI Staging - System Verification</title>
|
||||
<style>
|
||||
* { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
||||
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||
min-height: 100vh;
|
||||
padding: 20px;
|
||||
}
|
||||
.container {
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
background: white;
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 20px 40px rgba(0,0,0,0.1);
|
||||
overflow: hidden;
|
||||
}
|
||||
.header {
|
||||
background: #2d3748;
|
||||
color: white;
|
||||
padding: 30px;
|
||||
text-align: center;
|
||||
}
|
||||
.header h1 { font-size: 2.5em; margin-bottom: 10px; }
|
||||
.subtitle { opacity: 0.8; font-size: 1.1em; }
|
||||
.content { padding: 40px; }
|
||||
.status-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
||||
gap: 20px;
|
||||
margin-bottom: 40px;
|
||||
}
|
||||
.status-card {
|
||||
border: 1px solid #e2e8f0;
|
||||
border-radius: 8px;
|
||||
padding: 20px;
|
||||
background: #f8fafc;
|
||||
}
|
||||
.status-card h3 {
|
||||
color: #2d3748;
|
||||
margin-bottom: 15px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.status-indicator {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-radius: 50%;
|
||||
margin-right: 10px;
|
||||
}
|
||||
.healthy { background: #48bb78; }
|
||||
.warning { background: #ed8936; }
|
||||
.info { background: #4299e1; }
|
||||
.debug-section {
|
||||
background: #1a202c;
|
||||
color: #e2e8f0;
|
||||
border-radius: 8px;
|
||||
padding: 30px;
|
||||
margin-top: 30px;
|
||||
}
|
||||
.debug-section h3 { color: #90cdf4; margin-bottom: 20px; }
|
||||
.endpoint {
|
||||
background: #2d3748;
|
||||
border-radius: 4px;
|
||||
padding: 15px;
|
||||
margin: 10px 0;
|
||||
font-family: 'Courier New', monospace;
|
||||
border-left: 4px solid #4299e1;
|
||||
}
|
||||
.endpoint a { color: #90cdf4; text-decoration: none; }
|
||||
.endpoint a:hover { text-decoration: underline; }
|
||||
.tools {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 15px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
.tool-button {
|
||||
background: #4299e1;
|
||||
color: white;
|
||||
border: none;
|
||||
padding: 12px 20px;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
}
|
||||
.tool-button:hover { background: #3182ce; }
|
||||
.info-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin-top: 15px;
|
||||
}
|
||||
.info-table th, .info-table td {
|
||||
text-align: left;
|
||||
padding: 8px 12px;
|
||||
border-bottom: 1px solid #e2e8f0;
|
||||
}
|
||||
.info-table th { background: #f7fafc; font-weight: 600; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="header">
|
||||
<h1>EveAI Staging Environment</h1>
|
||||
<div class="subtitle">System Verification & Debug Console</div>
|
||||
</div>
|
||||
|
||||
<div class="content">
|
||||
<div class="status-grid">
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator healthy"></span>Cluster Status</h3>
|
||||
<p><strong>Environment:</strong> Staging</p>
|
||||
<p><strong>Ingress:</strong> NGINX</p>
|
||||
<p><strong>LoadBalancer:</strong> Scaleway (Automatic)</p>
|
||||
<p><strong>CDN:</strong> Bunny.net (Planned)</p>
|
||||
</div>
|
||||
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator healthy"></span>Network</h3>
|
||||
<p><strong>Host:</strong> <span id="hostname">Loading...</span></p>
|
||||
<p><strong>IP:</strong> <span id="clientip">Loading...</span></p>
|
||||
<p><strong>User-Agent:</strong> <span id="useragent">Loading...</span></p>
|
||||
</div>
|
||||
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator info"></span>Container Info</h3>
|
||||
<table class="info-table">
|
||||
<tr><th>Pod Name</th><td id="podname">verify-service</td></tr>
|
||||
<tr><th>Namespace</th><td>eveai-staging</td></tr>
|
||||
<tr><th>Container</th><td>nginx:alpine</td></tr>
|
||||
<tr><th>Path</th><td>/verify</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="status-card">
|
||||
<h3><span class="status-indicator warning"></span>Planned Services</h3>
|
||||
<p><strong>/admin</strong> - Admin interface (Not deployed)</p>
|
||||
<p><strong>/api</strong> - Backend API (Not deployed)</p>
|
||||
<p><strong>/client</strong> - Frontend app (Not deployed)</p>
|
||||
<p><strong>/verify</strong> - This debug service ✓</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="debug-section">
|
||||
<h3>Debug & Health Endpoints</h3>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>Health Check:</strong>
|
||||
<a href="/verify/health">/verify/health</a> - Basic health status
|
||||
</div>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>System Info:</strong>
|
||||
<a href="/verify/info">/verify/info</a> - Detailed system information
|
||||
</div>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>Headers:</strong>
|
||||
<a href="/verify/headers">/verify/headers</a> - Request headers analysis
|
||||
</div>
|
||||
|
||||
<div class="endpoint">
|
||||
<strong>Network Test:</strong>
|
||||
<a href="/verify/network">/verify/network</a> - Network connectivity tests
|
||||
</div>
|
||||
|
||||
<div class="tools">
|
||||
<button class="tool-button" onclick="testHealth()">Test Health</button>
|
||||
<button class="tool-button" onclick="testConnectivity()">Test APIs</button>
|
||||
<button class="tool-button" onclick="showHeaders()">Show Headers</button>
|
||||
<button class="tool-button" onclick="downloadLogs()">Get Logs</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Populate dynamic content
|
||||
document.getElementById('hostname').textContent = window.location.hostname;
|
||||
document.getElementById('clientip').textContent = 'Detected via headers';
|
||||
document.getElementById('useragent').textContent = navigator.userAgent.substring(0, 50) + '...';
|
||||
|
||||
// Debug functions
|
||||
function testHealth() {
|
||||
fetch('/verify/health')
|
||||
.then(response => response.text())
|
||||
.then(data => alert('Health check: ' + data))
|
||||
.catch(err => alert('Health check failed: ' + err));
|
||||
}
|
||||
|
||||
function testConnectivity() {
|
||||
alert('API connectivity tests would run here when APIs are deployed');
|
||||
}
|
||||
|
||||
function showHeaders() {
|
||||
window.open('/verify/headers', '_blank');
|
||||
}
|
||||
|
||||
function downloadLogs() {
|
||||
alert('Log download feature - would fetch container logs');
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
health.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Health Check</title></head>
|
||||
<body>
|
||||
<h1>Health Status: OK</h1>
|
||||
<p>Timestamp: <script>document.write(new Date().toISOString())</script></p>
|
||||
<p>Service: EveAI Staging Verification</p>
|
||||
<p>Status: All systems operational</p>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
info.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>System Information</title></head>
|
||||
<body>
|
||||
<h1>System Information</h1>
|
||||
<h2>Environment</h2>
|
||||
<ul>
|
||||
<li>Namespace: eveai-staging</li>
|
||||
<li>Service: verify-service</li>
|
||||
<li>Path: /verify</li>
|
||||
<li>Container: nginx:alpine</li>
|
||||
</ul>
|
||||
<h2>Network</h2>
|
||||
<ul>
|
||||
<li>Ingress Controller: NGINX</li>
|
||||
<li>LoadBalancer: Scaleway Automatic</li>
|
||||
<li>External IP: Via LoadBalancer</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
headers.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Request Headers</title></head>
|
||||
<body>
|
||||
<h1>Request Headers Analysis</h1>
|
||||
<p>This page would show detailed request headers in a production implementation.</p>
|
||||
<p>Useful for debugging CDN, LoadBalancer, and ingress header forwarding.</p>
|
||||
<h2>Expected Headers</h2>
|
||||
<ul>
|
||||
<li>Host: evie-staging.askeveai.com</li>
|
||||
<li>X-Forwarded-For: (Client IP)</li>
|
||||
<li>X-Forwarded-Proto: http/https</li>
|
||||
<li>User-Agent: (Browser/Tool)</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
network.html: |
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Network Tests</title></head>
|
||||
<body>
|
||||
<h1>Network Connectivity Tests</h1>
|
||||
<p>This page would run network connectivity tests:</p>
|
||||
<h2>Internal Tests</h2>
|
||||
<ul>
|
||||
<li>DNS Resolution</li>
|
||||
<li>Service Discovery</li>
|
||||
<li>Database Connectivity (when deployed)</li>
|
||||
</ul>
|
||||
<h2>External Tests</h2>
|
||||
<ul>
|
||||
<li>Internet Connectivity</li>
|
||||
<li>External API Reachability</li>
|
||||
<li>CDN Performance</li>
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
---
|
||||
# Custom nginx configuration
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: verify-nginx-config
|
||||
namespace: eveai-staging
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
# ACME Challenge support for SSL certificate verification
|
||||
location /.well-known/acme-challenge/ {
|
||||
access_log off;
|
||||
return 200 "acme-challenge-response";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Health endpoint for ingress controller (root level)
|
||||
location /healthz {
|
||||
access_log off;
|
||||
return 200 "healthy\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Health endpoint
|
||||
location /health {
|
||||
try_files /health.html =404;
|
||||
}
|
||||
|
||||
# Info endpoint
|
||||
location /info {
|
||||
try_files /info.html =404;
|
||||
}
|
||||
|
||||
# Headers analysis
|
||||
location /headers {
|
||||
try_files /headers.html =404;
|
||||
}
|
||||
|
||||
# Network tests
|
||||
location /network {
|
||||
try_files /network.html =404;
|
||||
}
|
||||
|
||||
# Main interface - serve index.html for all other requests
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
# Verification service deployment
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: verify-service
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: verify-service
|
||||
component: verification
|
||||
environment: staging
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: verify-service
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: verify-service
|
||||
component: verification
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: html-content
|
||||
mountPath: /usr/share/nginx/html
|
||||
- name: nginx-config
|
||||
mountPath: /etc/nginx/conf.d
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "64Mi"
|
||||
cpu: "100m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /verify/health
|
||||
port: 80
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /verify/health
|
||||
port: 80
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
volumes:
|
||||
- name: html-content
|
||||
configMap:
|
||||
name: verify-content
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
name: verify-nginx-config
|
||||
|
||||
---
|
||||
# Service for the verification app
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: verify-service
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: verify-service
|
||||
spec:
|
||||
selector:
|
||||
app: verify-service
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
name: http
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
# Ingress rules with path-based routing
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-staging-ingress
|
||||
namespace: eveai-staging
|
||||
labels:
|
||||
app: eveai
|
||||
environment: staging
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: evie-staging.askeveai.com
|
||||
http:
|
||||
paths:
|
||||
# Verification service paths
|
||||
- path: /verify
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
|
||||
# Future services (commented out for now)
|
||||
# Admin service (not deployed yet)
|
||||
# - path: /admin
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: admin-service
|
||||
# port:
|
||||
# number: 80
|
||||
|
||||
# API service (not deployed yet)
|
||||
# - path: /api
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: api-service
|
||||
# port:
|
||||
# number: 8000
|
||||
|
||||
# Client/Frontend service (not deployed yet)
|
||||
# - path: /client
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: client-service
|
||||
# port:
|
||||
# number: 3000
|
||||
|
||||
# Default: root path to verification service
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: verify-service
|
||||
port:
|
||||
number: 80
|
||||
277
scaleway/push_to_scaleway.sh
Executable file
277
scaleway/push_to_scaleway.sh
Executable file
@@ -0,0 +1,277 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Function to display usage information
|
||||
usage() {
|
||||
echo "Usage: $0 <version> <environment> [options]"
|
||||
echo " version : Version to push (e.g., v1.2.3, v1.2.3-alpha)"
|
||||
echo " environment : Target environment (staging|production)"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --services <service1,service2,...> : Specific services to push (default: all EveAI services)"
|
||||
echo " --dry-run : Show what would be done without executing"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 v1.2.3-alpha staging"
|
||||
echo " $0 v2.0.0 production --services eveai_api,eveai_workers"
|
||||
echo " $0 v1.0.0-beta staging --dry-run"
|
||||
}
|
||||
|
||||
# Check if required arguments are provided
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "❌ Error: Version and environment are required"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
ENVIRONMENT=$2
|
||||
shift 2
|
||||
|
||||
# Default values
|
||||
SERVICES=""
|
||||
DRY_RUN=false
|
||||
|
||||
# Parse options
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--services)
|
||||
SERVICES="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "❌ Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected argument: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate environment
|
||||
if [[ "$ENVIRONMENT" != "staging" && "$ENVIRONMENT" != "production" ]]; then
|
||||
echo "❌ Error: Environment must be 'staging' or 'production'"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate version format (flexible semantic versioning)
|
||||
if [[ ! "$VERSION" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9\-]+)?$ ]]; then
|
||||
echo "❌ Error: Invalid version format. Expected format: v1.2.3 or v1.2.3-alpha"
|
||||
echo " Examples: v1.0.0, v2.1.3-beta, v1.0.0-rc1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure version starts with 'v'
|
||||
if [[ ! "$VERSION" =~ ^v ]]; then
|
||||
VERSION="v$VERSION"
|
||||
fi
|
||||
|
||||
# Get script directory to find config files
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CONFIG_FILE="$SCRIPT_DIR/configs/$ENVIRONMENT.conf"
|
||||
|
||||
# Check if config file exists
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo "❌ Error: Config file not found: $CONFIG_FILE"
|
||||
echo " Please create the config file with Scaleway credentials"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load configuration
|
||||
echo "📋 Loading configuration from: $CONFIG_FILE"
|
||||
source "$CONFIG_FILE"
|
||||
|
||||
# Validate required config variables
|
||||
if [[ -z "$SCALEWAY_REGISTRY" || -z "$SCALEWAY_API_KEY" ]]; then
|
||||
echo "❌ Error: Missing required configuration in $CONFIG_FILE"
|
||||
echo " Required: SCALEWAY_REGISTRY, SCALEWAY_API_KEY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Local registry configuration
|
||||
LOCAL_REGISTRY="registry.ask-eve-ai-local.com"
|
||||
ACCOUNT="josakola"
|
||||
|
||||
# Check if podman is available
|
||||
if ! command -v podman &> /dev/null; then
|
||||
echo "❌ Error: podman not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if yq is available
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo "❌ Error: yq not found (required for parsing compose file)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if compose file exists
|
||||
COMPOSE_FILE="../docker/compose_dev.yaml"
|
||||
if [[ ! -f "$COMPOSE_FILE" ]]; then
|
||||
echo "❌ Error: Compose file '$COMPOSE_FILE' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🚀 EveAI Scaleway Push Script"
|
||||
echo "📦 Version: $VERSION"
|
||||
echo "🌍 Environment: $ENVIRONMENT"
|
||||
echo "🏪 Local Registry: $LOCAL_REGISTRY"
|
||||
echo "☁️ Scaleway Registry: $SCALEWAY_REGISTRY"
|
||||
echo "👤 Account: $ACCOUNT"
|
||||
|
||||
# Get services to process
|
||||
if [[ -n "$SERVICES" ]]; then
|
||||
# Convert comma-separated list to array
|
||||
IFS=',' read -ra SERVICE_ARRAY <<< "$SERVICES"
|
||||
else
|
||||
# Get all EveAI services (excluding nginx as per requirements)
|
||||
SERVICE_ARRAY=()
|
||||
while IFS= read -r line; do
|
||||
SERVICE_ARRAY+=("$line")
|
||||
done < <(yq e '.services | keys | .[]' "$COMPOSE_FILE" | grep -E '^eveai_')
|
||||
fi
|
||||
|
||||
echo "🔍 Services to process: ${SERVICE_ARRAY[*]}"
|
||||
|
||||
# Function to check if image exists locally
|
||||
check_local_image_exists() {
|
||||
local image_name="$1"
|
||||
if podman image exists "$image_name" 2>/dev/null; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to authenticate with Scaleway registry
|
||||
authenticate_scaleway() {
|
||||
echo "🔐 Authenticating with Scaleway registry..."
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " 🔍 [DRY RUN] Would authenticate with Scaleway registry"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Extract registry hostname from full registry URL
|
||||
REGISTRY_HOST=$(echo "$SCALEWAY_REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
# Login to Scaleway registry using API key
|
||||
if ! echo "$SCALEWAY_API_KEY" | podman login --username nologin --password-stdin "$REGISTRY_HOST"; then
|
||||
echo " ❌ Failed to authenticate with Scaleway registry"
|
||||
echo " 💡 Check your API key in $CONFIG_FILE"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo " ✅ Successfully authenticated with Scaleway registry"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Authenticate with Scaleway
|
||||
if ! authenticate_scaleway; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Process each service
|
||||
PROCESSED_SERVICES=()
|
||||
FAILED_SERVICES=()
|
||||
|
||||
for SERVICE in "${SERVICE_ARRAY[@]}"; do
|
||||
echo ""
|
||||
echo "🔄 Processing service: $SERVICE"
|
||||
|
||||
# Check if service exists in compose file
|
||||
if ! yq e ".services.$SERVICE" "$COMPOSE_FILE" | grep -q "image:"; then
|
||||
echo "⚠️ Warning: Service '$SERVICE' not found in $COMPOSE_FILE, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Construct image names
|
||||
LOCAL_VERSION_IMAGE="$LOCAL_REGISTRY/$ACCOUNT/$SERVICE:$VERSION"
|
||||
SCALEWAY_VERSION_IMAGE="$SCALEWAY_REGISTRY/$ACCOUNT/$SERVICE:$VERSION"
|
||||
|
||||
echo " 📥 Source: $LOCAL_VERSION_IMAGE"
|
||||
echo " 📤 Target: $SCALEWAY_VERSION_IMAGE"
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo " 🔍 [DRY RUN] Would push $LOCAL_VERSION_IMAGE to $SCALEWAY_VERSION_IMAGE"
|
||||
PROCESSED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if local version image exists
|
||||
if ! check_local_image_exists "$LOCAL_VERSION_IMAGE"; then
|
||||
echo " ❌ Local version image not found: $LOCAL_VERSION_IMAGE"
|
||||
echo " 💡 Run tag_registry_version.sh first to create version tags"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Pull local version image to ensure we have it
|
||||
echo " 📥 Pulling local version image..."
|
||||
if ! podman pull "$LOCAL_VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to pull $LOCAL_VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Tag for Scaleway registry (direct push with same version tag)
|
||||
echo " 🏷️ Tagging for Scaleway registry..."
|
||||
if ! podman tag "$LOCAL_VERSION_IMAGE" "$SCALEWAY_VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to tag $LOCAL_VERSION_IMAGE as $SCALEWAY_VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Push to Scaleway registry
|
||||
echo " 📤 Pushing to Scaleway registry..."
|
||||
if ! podman push "$SCALEWAY_VERSION_IMAGE"; then
|
||||
echo " ❌ Failed to push $SCALEWAY_VERSION_IMAGE"
|
||||
FAILED_SERVICES+=("$SERVICE")
|
||||
continue
|
||||
fi
|
||||
|
||||
# Clean up local Scaleway tag
|
||||
echo " 🧹 Cleaning up local Scaleway tag..."
|
||||
podman rmi "$SCALEWAY_VERSION_IMAGE" 2>/dev/null || true
|
||||
|
||||
echo " ✅ Successfully pushed $SERVICE version $VERSION to Scaleway"
|
||||
PROCESSED_SERVICES+=("$SERVICE")
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo "📊 Summary:"
|
||||
echo "✅ Successfully processed: ${#PROCESSED_SERVICES[@]} services"
|
||||
if [[ ${#PROCESSED_SERVICES[@]} -gt 0 ]]; then
|
||||
printf " - %s\n" "${PROCESSED_SERVICES[@]}"
|
||||
fi
|
||||
|
||||
if [[ ${#FAILED_SERVICES[@]} -gt 0 ]]; then
|
||||
echo "❌ Failed: ${#FAILED_SERVICES[@]} services"
|
||||
printf " - %s\n" "${FAILED_SERVICES[@]}"
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == true ]]; then
|
||||
echo "🔍 This was a dry run - no actual changes were made"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [[ ${#FAILED_SERVICES[@]} -eq 0 ]]; then
|
||||
echo "🎉 All services successfully pushed to Scaleway $ENVIRONMENT!"
|
||||
echo "☁️ Images are available in Scaleway registry: $SCALEWAY_REGISTRY/$ACCOUNT/[service]:$VERSION"
|
||||
else
|
||||
echo "⚠️ Some services failed to process. Check the errors above."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🕐 Finished at $(date +"%d/%m/%Y %H:%M:%S")"
|
||||
203
scaleway/scaleway_env_switch.sh
Executable file
203
scaleway/scaleway_env_switch.sh
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env zsh
|
||||
|
||||
# Function to display usage information
|
||||
usage() {
|
||||
echo "Usage: source $0 <environment>"
|
||||
echo " environment: The Scaleway environment to use (staging|production)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " source $0 staging"
|
||||
echo " source $0 production"
|
||||
}
|
||||
|
||||
# Check if the script is sourced - improved for both bash and zsh
|
||||
is_sourced() {
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
# In zsh, check if we're in a sourced context
|
||||
[[ "$ZSH_EVAL_CONTEXT" =~ "(:file|:cmdsubst)" ]] || [[ "$0" != "$ZSH_ARGZERO" ]]
|
||||
else
|
||||
# In bash, compare BASH_SOURCE with $0
|
||||
[[ "${BASH_SOURCE[0]}" != "${0}" ]]
|
||||
fi
|
||||
}
|
||||
|
||||
if ! is_sourced; then
|
||||
echo "❌ Error: This script must be sourced, not executed directly."
|
||||
echo "Please run: source $0 <environment>"
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
return 1 2>/dev/null || exit 1
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if an environment is provided
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
return 1
|
||||
fi
|
||||
|
||||
ENVIRONMENT=$1
|
||||
|
||||
# Validate environment
|
||||
if [[ "$ENVIRONMENT" != "staging" && "$ENVIRONMENT" != "production" ]]; then
|
||||
echo "❌ Invalid environment: $ENVIRONMENT"
|
||||
usage
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get script directory to find config files
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
|
||||
CONFIG_FILE="$SCRIPT_DIR/configs/$ENVIRONMENT.conf"
|
||||
|
||||
# Check if config file exists
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo "❌ Error: Config file not found: $CONFIG_FILE"
|
||||
echo " Please create the config file with Scaleway credentials"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Load configuration
|
||||
echo "📋 Loading Scaleway $ENVIRONMENT configuration..."
|
||||
source "$CONFIG_FILE"
|
||||
|
||||
# Validate required config variables
|
||||
if [[ -z "$SCALEWAY_REGISTRY" || -z "$SCALEWAY_API_KEY" ]]; then
|
||||
echo "❌ Error: Missing required configuration in $CONFIG_FILE"
|
||||
echo " Required: SCALEWAY_REGISTRY, SCALEWAY_API_KEY"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if kubectl is available for K8s context switching
|
||||
KUBECTL_AVAILABLE=false
|
||||
if command -v kubectl &> /dev/null; then
|
||||
KUBECTL_AVAILABLE=true
|
||||
else
|
||||
echo "⚠️ Warning: kubectl not found - K8s context switching will be skipped"
|
||||
fi
|
||||
|
||||
echo "☁️ Scaleway Environment Switch"
|
||||
echo "🌍 Environment: $ENVIRONMENT"
|
||||
echo "🏪 Registry: $SCALEWAY_REGISTRY"
|
||||
echo "🌐 Region: ${SCALEWAY_REGION:-fr-par}"
|
||||
|
||||
# Set environment variables
|
||||
export SCALEWAY_ENVIRONMENT=$ENVIRONMENT
|
||||
export SCALEWAY_REGISTRY=$SCALEWAY_REGISTRY
|
||||
export SCALEWAY_API_KEY=$SCALEWAY_API_KEY
|
||||
export SCALEWAY_REGION=${SCALEWAY_REGION:-fr-par}
|
||||
export SCALEWAY_PROJECT_ID=${SCALEWAY_PROJECT_ID:-}
|
||||
|
||||
# Handle kubectl context switching if available
|
||||
if [[ "$KUBECTL_AVAILABLE" == true && -n "$K8S_CONTEXT" ]]; then
|
||||
echo "🔄 Switching kubectl context..."
|
||||
|
||||
# Check if the context exists
|
||||
if kubectl config get-contexts "$K8S_CONTEXT" &>/dev/null; then
|
||||
if kubectl config use-context "$K8S_CONTEXT" &>/dev/null; then
|
||||
echo "✅ Switched to kubectl context: $K8S_CONTEXT"
|
||||
export KUBECTL_CONTEXT=$K8S_CONTEXT
|
||||
else
|
||||
echo "⚠️ Warning: Failed to switch to kubectl context: $K8S_CONTEXT"
|
||||
fi
|
||||
else
|
||||
echo "⚠️ Warning: kubectl context '$K8S_CONTEXT' does not exist"
|
||||
echo " 💡 You may need to configure this context manually"
|
||||
fi
|
||||
elif [[ -n "$K8S_CONTEXT" ]]; then
|
||||
echo "⚠️ kubectl not available - context switching skipped"
|
||||
export KUBECTL_CONTEXT=$K8S_CONTEXT
|
||||
fi
|
||||
|
||||
# Define helper functions for Scaleway operations
|
||||
scaleway_login() {
|
||||
echo "🔐 Logging into Scaleway registry..."
|
||||
|
||||
# Extract registry hostname from full registry URL
|
||||
REGISTRY_HOST=$(echo "$SCALEWAY_REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
# Login to Scaleway registry using API key
|
||||
if echo "$SCALEWAY_API_KEY" | podman login --username nologin --password-stdin "$REGISTRY_HOST"; then
|
||||
echo "✅ Successfully logged into Scaleway registry"
|
||||
else
|
||||
echo "❌ Failed to login to Scaleway registry"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
scaleway_logout() {
|
||||
echo "🔓 Logging out of Scaleway registry..."
|
||||
|
||||
# Extract registry hostname from full registry URL
|
||||
REGISTRY_HOST=$(echo "$SCALEWAY_REGISTRY" | cut -d'/' -f1)
|
||||
|
||||
if podman logout "$REGISTRY_HOST" 2>/dev/null; then
|
||||
echo "✅ Successfully logged out of Scaleway registry"
|
||||
else
|
||||
echo "⚠️ Warning: Could not logout of Scaleway registry (may not have been logged in)"
|
||||
fi
|
||||
}
|
||||
|
||||
scaleway_info() {
|
||||
echo "📋 Current Scaleway Configuration:"
|
||||
echo " 🌍 Environment: $SCALEWAY_ENVIRONMENT"
|
||||
echo " 🏪 Registry: $SCALEWAY_REGISTRY"
|
||||
echo " 🌐 Region: $SCALEWAY_REGION"
|
||||
if [[ -n "$SCALEWAY_PROJECT_ID" ]]; then
|
||||
echo " 📁 Project ID: $SCALEWAY_PROJECT_ID"
|
||||
fi
|
||||
if [[ -n "$KUBECTL_CONTEXT" ]]; then
|
||||
echo " ⚙️ K8s Context: $KUBECTL_CONTEXT"
|
||||
fi
|
||||
if [[ -n "$K8S_CLUSTER_NAME" ]]; then
|
||||
echo " 🏗️ Cluster: $K8S_CLUSTER_NAME"
|
||||
fi
|
||||
}
|
||||
|
||||
scaleway_push() {
|
||||
local version="$1"
|
||||
if [[ -z "$version" ]]; then
|
||||
echo "❌ Error: Version is required"
|
||||
echo "Usage: scaleway_push <version> [services]"
|
||||
return 1
|
||||
fi
|
||||
|
||||
shift
|
||||
local services="$*"
|
||||
|
||||
echo "🚀 Pushing version $version to Scaleway $SCALEWAY_ENVIRONMENT..."
|
||||
|
||||
if [[ -n "$services" ]]; then
|
||||
"$SCRIPT_DIR/push_to_scaleway.sh" "$version" "$SCALEWAY_ENVIRONMENT" --services "$services"
|
||||
else
|
||||
"$SCRIPT_DIR/push_to_scaleway.sh" "$version" "$SCALEWAY_ENVIRONMENT"
|
||||
fi
|
||||
}
|
||||
|
||||
# Export functions - handle both bash and zsh
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
# In zsh, functions are automatically available in subshells
|
||||
# But we can make them available globally with typeset
|
||||
typeset -f scaleway_login scaleway_logout scaleway_info scaleway_push > /dev/null
|
||||
else
|
||||
# Bash style export
|
||||
export -f scaleway_login scaleway_logout scaleway_info scaleway_push
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Scaleway environment switched to $ENVIRONMENT"
|
||||
echo "☁️ Registry: $SCALEWAY_REGISTRY"
|
||||
if [[ -n "$KUBECTL_CONTEXT" ]]; then
|
||||
echo "⚙️ kubectl context: $KUBECTL_CONTEXT"
|
||||
fi
|
||||
echo ""
|
||||
echo "Available commands:"
|
||||
echo " scaleway_login - Login to Scaleway registry"
|
||||
echo " scaleway_logout - Logout from Scaleway registry"
|
||||
echo " scaleway_info - Show current configuration"
|
||||
echo " scaleway_push - Push version to current environment"
|
||||
echo ""
|
||||
echo "💡 Example usage:"
|
||||
echo " scaleway_login"
|
||||
echo " scaleway_push v1.2.3-alpha"
|
||||
echo " scaleway_push v2.0.0 eveai_api,eveai_workers"
|
||||
243
scaleway/scaleway_staging_setup.md
Normal file
243
scaleway/scaleway_staging_setup.md
Normal file
@@ -0,0 +1,243 @@
|
||||
### Aangepaste Analyse en Stappenplan - Definitieve Versie
|
||||
|
||||
Bedankt voor de duidelijke antwoorden! Dit geeft me alle informatie die ik nodig heb om een definitief stappenplan op te stellen.
|
||||
|
||||
### Aangepaste Situatie-analyse
|
||||
|
||||
#### **Persistent Storage Requirements**
|
||||
Je hebt gelijk - voor de **interne Prometheus** hebben we inderdaad persistent storage nodig voor:
|
||||
- **Prometheus data**: Metrics history (7-14 dagen retentie)
|
||||
- **Pushgateway data**: Temporary metrics buffer
|
||||
- **Application logs**: Via Scaleway Logs (managed)
|
||||
|
||||
#### **Logging Strategie - Helder**
|
||||
- **Application logs**: Scaleway Logs (managed, 7-14 dagen)
|
||||
- **Business event logs**: PostgreSQL (jouw controle, facturatie)
|
||||
- **Audit logs**: Niet expliciet, maar DB tracking van wijzigingen bestaat al
|
||||
|
||||
#### **Infrastructure Status**
|
||||
- **Staging cluster**: Operationeel maar leeg
|
||||
- **DNS toegang**: Via cpanel beschikbaar
|
||||
- **Secrets**: Volledig geconfigureerd in Scaleway Secret Manager
|
||||
|
||||
### Aangepast Stappenplan
|
||||
|
||||
#### **Fase 1: Infrastructure & Storage Setup (Week 1)**
|
||||
|
||||
1. **Persistent Storage Configuratie**
|
||||
```yaml
|
||||
# Scaleway Block Storage volumes
|
||||
- prometheus-data: 20GB (metrics retention)
|
||||
- pushgateway-data: 5GB (temporary buffer)
|
||||
- application-logs: 10GB (7-dagen buffer voor Scaleway Logs)
|
||||
```
|
||||
|
||||
2. **DNS & SSL Setup**
|
||||
- Configureer `evie-staging.askeveai.com` in cpanel
|
||||
- Point naar K8s LoadBalancer IP
|
||||
- Setup Let's Encrypt SSL certificaten
|
||||
|
||||
3. **Scaleway Logs Setup**
|
||||
```yaml
|
||||
# Fluent Bit DaemonSet configuratie
|
||||
# Direct shipping naar Scaleway Logs
|
||||
# 7-dagen retentie policy
|
||||
```
|
||||
|
||||
4. **External Secrets Operator**
|
||||
- Installeer ESO in K8s cluster
|
||||
- Configureer Scaleway Secret Manager integration
|
||||
- Test secrets mapping
|
||||
|
||||
#### **Fase 2: Monitoring Stack Deployment (Week 1-2)**
|
||||
|
||||
5. **Prometheus Stack met Persistent Storage**
|
||||
```yaml
|
||||
# Prometheus Deployment
|
||||
spec:
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: prometheus-data
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
|
||||
# Pushgateway Deployment
|
||||
spec:
|
||||
volumes:
|
||||
- name: pushgateway-data
|
||||
persistentVolumeClaim:
|
||||
claimName: pushgateway-pvc
|
||||
```
|
||||
|
||||
6. **Business Metrics Integratie**
|
||||
- Behoud huidige `business_event.py` logica
|
||||
- Pushgateway blijft beschikbaar op K8s
|
||||
- Configureer Prometheus scraping van pushgateway
|
||||
|
||||
7. **Scaleway Cockpit Remote Write**
|
||||
```yaml
|
||||
# Prometheus configuratie
|
||||
remote_write:
|
||||
- url: "https://metrics.cockpit.fr-par.scw.cloud/api/v1/push"
|
||||
headers:
|
||||
X-Token: "{{ scaleway_metrics_token }}"
|
||||
```
|
||||
|
||||
#### **Fase 3: Application Services Deployment (Week 2)**
|
||||
|
||||
8. **Core Services met Secrets Integration**
|
||||
```yaml
|
||||
# Deployment template voor alle 8 services
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: eveai-service
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: eveai-app-secrets
|
||||
- secretRef:
|
||||
name: eveai-postgresql-secrets
|
||||
# etc.
|
||||
```
|
||||
|
||||
9. **Ingress Controller & SSL**
|
||||
```yaml
|
||||
# Nginx Ingress met SSL
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: eveai-staging-ingress
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- evie-staging.askeveai.com
|
||||
secretName: eveai-staging-tls
|
||||
```
|
||||
|
||||
10. **Service Dependencies & Health Checks**
|
||||
- Init containers voor database migrations
|
||||
- Readiness/liveness probes voor alle services
|
||||
- Service discovery configuratie
|
||||
|
||||
#### **Fase 4: Logging & Observability (Week 2-3)**
|
||||
|
||||
11. **Scaleway Logs Integration**
|
||||
```yaml
|
||||
# Fluent Bit DaemonSet
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluent-bit
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: fluent-bit
|
||||
image: fluent/fluent-bit:latest
|
||||
env:
|
||||
- name: SCALEWAY_LOGS_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: scaleway-logs-secret
|
||||
key: token
|
||||
```
|
||||
|
||||
12. **Log Routing Configuratie**
|
||||
- Application logs → Scaleway Logs (7-dagen retentie)
|
||||
- Business events → PostgreSQL (jouw controle)
|
||||
- System logs → Scaleway Logs
|
||||
- Error logs → Scaleway Logs + alerting
|
||||
|
||||
#### **Fase 5: Testing & Go-Live (Week 3-4)**
|
||||
|
||||
13. **Deployment Automation**
|
||||
- Update `push_to_scaleway.sh` voor K8s deployment
|
||||
- Rolling update configuratie
|
||||
- Rollback procedures
|
||||
|
||||
14. **Monitoring Dashboards**
|
||||
- Scaleway Cockpit dashboards voor infrastructure
|
||||
- Custom business metrics visualisatie
|
||||
- Alerting configuratie
|
||||
|
||||
15. **Load Testing & Performance**
|
||||
- Staging environment load testing
|
||||
- Resource limits fine-tuning
|
||||
- Auto-scaling configuratie (HPA)
|
||||
|
||||
### Specifieke Storage Configuratie
|
||||
|
||||
#### **Persistent Volumes Planning**
|
||||
|
||||
```yaml
|
||||
# Prometheus Storage
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: prometheus-data-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
storageClassName: scw-bssd # Scaleway Block Storage
|
||||
|
||||
# Pushgateway Storage
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pushgateway-data-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: scw-bssd
|
||||
```
|
||||
|
||||
#### **Monitoring Stack Resource Allocation**
|
||||
|
||||
Voor staging environment:
|
||||
- **Prometheus**: 2GB RAM, 1 CPU, 20GB storage (14-dagen metrics)
|
||||
- **Pushgateway**: 512MB RAM, 0.5 CPU, 5GB storage
|
||||
- **Fluent Bit**: 256MB RAM, 0.2 CPU per node
|
||||
|
||||
### Antwoorden op je Specifieke Vragen
|
||||
|
||||
#### **1. Persistent Storage voor Prometheus**
|
||||
Ja, absoluut nodig. Zonder persistent storage verlies je alle metrics bij pod restarts. We configureren Scaleway Block Storage volumes voor:
|
||||
- Prometheus metrics data (20GB, 14-dagen retentie)
|
||||
- Pushgateway buffer (5GB)
|
||||
|
||||
#### **2. Audit Logs Verduidelijking**
|
||||
Met "audit logs" bedoelde ik system-level logs (wie heeft toegang gehad tot welke resources, API calls, etc.). Jouw DB tracking van wijzigingen is perfect en blijft zoals het is.
|
||||
|
||||
#### **3. Scaleway Logs vs Kubernetes Native**
|
||||
Scaleway Logs is inderdaad de beste keuze omdat:
|
||||
- Volledig managed (geen onderhoud)
|
||||
- Automatische retentie management
|
||||
- Geïntegreerd met Cockpit
|
||||
- Kosteneffectief voor staging
|
||||
|
||||
#### **4. Business Event Logs**
|
||||
Deze blijven in PostgreSQL zoals nu, want:
|
||||
- Nodig voor facturatie (kritieke data)
|
||||
- Jouw controle over retentie
|
||||
- Gestructureerde data voor business logic
|
||||
|
||||
### Volgende Concrete Stappen
|
||||
|
||||
1. **DNS Setup**: Configureer `evie-staging.askeveai.com` in cpanel
|
||||
2. **Storage Classes**: Verificeer Scaleway Block Storage classes in K8s
|
||||
3. **External Secrets**: Installeer ESO en test secrets mapping
|
||||
4. **Monitoring Stack**: Deploy Prometheus + Pushgateway met persistent storage
|
||||
|
||||
Wil je dat we beginnen met stap 1 (DNS setup) of heb je voorkeur voor een andere volgorde?
|
||||
Reference in New Issue
Block a user