- Functional control plan

This commit is contained in:
Josako
2025-08-18 11:44:23 +02:00
parent 066f579294
commit 84a9334c80
17 changed files with 3619 additions and 55 deletions

View File

@@ -6,6 +6,8 @@ set -e
echo "🚀 Setting up EveAI Dev Kind Cluster..."
CLUSTER_NAME="eveai-dev-cluster"
# Colors voor output
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -82,7 +84,7 @@ create_host_directories() {
done
# Set proper permissions
chmod -R 755 "$BASE_DIR"
# chmod -R 755 "$BASE_DIR"
print_success "Host directories created and configured"
}
@@ -133,13 +135,114 @@ create_cluster() {
kubectl wait --for=condition=Ready nodes --all --timeout=300s
# Update CA certificates in Kind node
print_status "Updating CA certificates in cluster..."
docker exec eveai-dev-cluster-control-plane update-ca-certificates
docker exec eveai-dev-cluster-control-plane systemctl restart containerd
if command -v podman &> /dev/null; then
podman exec eveai-dev-cluster-control-plane update-ca-certificates
podman exec eveai-dev-cluster-control-plane systemctl restart containerd
else
docker exec eveai-dev-cluster-control-plane update-ca-certificates
docker exec eveai-dev-cluster-control-plane systemctl restart containerd
fi
print_success "Kind cluster created successfully"
}
# Configure container resource limits to prevent CRI issues
configure_container_limits() {
print_status "Configuring container resource limits..."
# Configure file descriptor and inotify limits to prevent CRI plugin failures
podman exec "${CLUSTER_NAME}-control-plane" sh -c '
echo "fs.inotify.max_user_instances = 1024" >> /etc/sysctl.conf
echo "fs.inotify.max_user_watches = 524288" >> /etc/sysctl.conf
echo "fs.file-max = 2097152" >> /etc/sysctl.conf
sysctl -p
'
# Restart containerd to apply new limits
print_status "Restarting containerd with new limits..."
podman exec "${CLUSTER_NAME}-control-plane" systemctl restart containerd
# Wait for containerd to stabilize
sleep 10
# Restart kubelet to ensure proper CRI communication
podman exec "${CLUSTER_NAME}-control-plane" systemctl restart kubelet
print_success "Container limits configured and services restarted"
}
# Verify CRI status and functionality
verify_cri_status() {
print_status "Verifying CRI status..."
# Wait for services to stabilize
sleep 15
# Test CRI connectivity
if podman exec "${CLUSTER_NAME}-control-plane" crictl version &>/dev/null; then
print_success "CRI is functional"
# Show CRI version info
print_status "CRI version information:"
podman exec "${CLUSTER_NAME}-control-plane" crictl version
else
print_error "CRI is not responding - checking containerd logs"
podman exec "${CLUSTER_NAME}-control-plane" journalctl -u containerd --no-pager -n 20
print_error "Checking kubelet logs"
podman exec "${CLUSTER_NAME}-control-plane" journalctl -u kubelet --no-pager -n 10
return 1
fi
# Verify node readiness
print_status "Waiting for node to become Ready..."
local max_attempts=30
local attempt=0
while [ $attempt -lt $max_attempts ]; do
if kubectl get nodes | grep -q "Ready"; then
print_success "Node is Ready"
return 0
fi
attempt=$((attempt + 1))
print_status "Attempt $attempt/$max_attempts - waiting for node readiness..."
sleep 10
done
print_error "Node failed to become Ready within timeout"
kubectl get nodes -o wide
return 1
}
# Install Ingress Controller
install_ingress_controller() {
print_status "Installing NGINX Ingress Controller..."
# Install NGINX Ingress Controller for Kind
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.1/deploy/static/provider/kind/deploy.yaml
# Wait for Ingress Controller to be ready
print_status "Waiting for Ingress Controller to be ready..."
kubectl wait --namespace ingress-nginx \
--for=condition=ready pod \
--selector=app.kubernetes.io/component=controller \
--timeout=300s
if [ $? -eq 0 ]; then
print_success "NGINX Ingress Controller installed and ready"
else
print_error "Failed to install or start Ingress Controller"
exit 1
fi
# Verify Ingress Controller status
print_status "Ingress Controller status:"
kubectl get pods -n ingress-nginx
kubectl get services -n ingress-nginx
}
# Apply Kubernetes manifests
apply_manifests() {
print_status "Applying Kubernetes manifests..."
@@ -197,6 +300,9 @@ main() {
check_prerequisites
create_host_directories
create_cluster
configure_container_limits
verify_cri_status
install_ingress_controller
apply_manifests
verify_cluster
@@ -206,22 +312,20 @@ main() {
echo "=================================================="
echo ""
echo "📋 Next steps:"
echo "1. Deploy your application services using the service manifests"
echo "2. Configure DNS entries for local development"
echo "3. Access services via the mapped ports (3000-3999 range)"
echo "1. Deploy your application services using: ./deploy-all-services.sh"
echo "2. Access services via Ingress: http://minty.ask-eve-ai-local.com:3080"
echo ""
echo "🔧 Useful commands:"
echo " kubectl config current-context # Verify you're using the right cluster"
echo " kubectl get all -n eveai-dev # Check all resources in dev namespace"
echo " kubectl get ingress -n eveai-dev # Check Ingress resources"
echo " kind delete cluster --name eveai-dev-cluster # Delete cluster when done"
echo ""
echo "📊 Port mappings:"
echo " - Nginx: http://minty.ask-eve-ai-local.com:3080"
echo " - EveAI App: http://minty.ask-eve-ai-local.com:3001"
echo " - EveAI API: http://minty.ask-eve-ai-local.com:3003"
echo " - Chat Client: http://minty.ask-eve-ai-local.com:3004"
echo " - MinIO Console: http://minty.ask-eve-ai-local.com:3009"
echo " - Grafana: http://minty.ask-eve-ai-local.com:3012"
echo "📊 Service Access (via Ingress):"
echo " - Main App: http://minty.ask-eve-ai-local.com:3080/admin/"
echo " - API: http://minty.ask-eve-ai-local.com:3080/api/"
echo " - Chat Client: http://minty.ask-eve-ai-local.com:3080/chat-client/"
echo " - Static Files: http://minty.ask-eve-ai-local.com:3080/static/"
}
# Run main function