- Opzet cluster werkt
- Opstart redis en minio werkt - Bezig om eigenlijke apps op te starten ... werkt nog niet.
This commit is contained in:
@@ -67,7 +67,6 @@ create_host_directories() {
|
||||
|
||||
directories=(
|
||||
"$BASE_DIR/minio"
|
||||
"$BASE_DIR/redis"
|
||||
"$BASE_DIR/logs"
|
||||
"$BASE_DIR/prometheus"
|
||||
"$BASE_DIR/grafana"
|
||||
@@ -107,7 +106,7 @@ create_cluster() {
|
||||
|
||||
KIND_CONFIG="kind-dev-cluster.yaml"
|
||||
if [ ! -f "${KIND_CONFIG}" ]; then
|
||||
print_error "Config '${KIND_CONFIG}' niet gevonden in $(pwd)"
|
||||
print_error "Config '${KIND_CONFIG}' not found in $(pwd)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -146,30 +145,6 @@ create_cluster() {
|
||||
print_success "Kind cluster created successfully"
|
||||
}
|
||||
|
||||
# Configure container resource limits to prevent CRI issues
|
||||
configure_container_limits() {
|
||||
print_status "Configuring container resource limits..."
|
||||
|
||||
# Configure file descriptor and inotify limits to prevent CRI plugin failures
|
||||
podman exec "${CLUSTER_NAME}-control-plane" sh -c '
|
||||
echo "fs.inotify.max_user_instances = 1024" >> /etc/sysctl.conf
|
||||
echo "fs.inotify.max_user_watches = 524288" >> /etc/sysctl.conf
|
||||
echo "fs.file-max = 2097152" >> /etc/sysctl.conf
|
||||
sysctl -p
|
||||
'
|
||||
|
||||
# Restart containerd to apply new limits
|
||||
print_status "Restarting containerd with new limits..."
|
||||
podman exec "${CLUSTER_NAME}-control-plane" systemctl restart containerd
|
||||
|
||||
# Wait for containerd to stabilize
|
||||
sleep 10
|
||||
|
||||
# Restart kubelet to ensure proper CRI communication
|
||||
podman exec "${CLUSTER_NAME}-control-plane" systemctl restart kubelet
|
||||
|
||||
print_success "Container limits configured and services restarted"
|
||||
}
|
||||
|
||||
# Verify CRI status and functionality
|
||||
verify_cri_status() {
|
||||
@@ -233,8 +208,23 @@ install_ingress_controller() {
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "NGINX Ingress Controller installed and ready"
|
||||
else
|
||||
print_error "Failed to install or start Ingress Controller"
|
||||
exit 1
|
||||
print_warning "Ingress Controller not ready, trying to label node..."
|
||||
# Label the node for ingress (fallback for scheduling issues)
|
||||
kubectl label node eveai-dev-cluster-control-plane ingress-ready=true --overwrite
|
||||
|
||||
# Wait again for Ingress Controller to be ready
|
||||
print_status "Waiting for Ingress Controller after node labeling..."
|
||||
kubectl wait --namespace ingress-nginx \
|
||||
--for=condition=ready pod \
|
||||
--selector=app.kubernetes.io/component=controller \
|
||||
--timeout=300s
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "NGINX Ingress Controller ready after node labeling"
|
||||
else
|
||||
print_error "Failed to install or start Ingress Controller even after node labeling"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Verify Ingress Controller status
|
||||
@@ -247,16 +237,38 @@ install_ingress_controller() {
|
||||
apply_manifests() {
|
||||
print_status "Applying Kubernetes manifests..."
|
||||
|
||||
# Apply in correct order
|
||||
# Apply base manifests in correct order (namespace.yaml handles namespace creation)
|
||||
manifests=(
|
||||
"namespace.yaml"
|
||||
"persistent-volumes.yaml"
|
||||
"config-secrets.yaml"
|
||||
"network-policies.yaml"
|
||||
)
|
||||
|
||||
for manifest in "${manifests[@]}"; do
|
||||
if [ -f "$manifest" ]; then
|
||||
print_status "Applying $manifest..."
|
||||
kubectl apply -f "$manifest"
|
||||
|
||||
# Apply with retry logic for race condition handling
|
||||
local max_attempts=3
|
||||
local attempt=1
|
||||
local success=false
|
||||
|
||||
while [ $attempt -le $max_attempts ] && [ "$success" = false ]; do
|
||||
if kubectl apply -f "$manifest"; then
|
||||
print_success "Successfully applied: $manifest"
|
||||
success=true
|
||||
else
|
||||
if [ $attempt -lt $max_attempts ]; then
|
||||
print_warning "Attempt $attempt failed for $manifest, retrying in 3 seconds..."
|
||||
sleep 3
|
||||
attempt=$((attempt + 1))
|
||||
else
|
||||
print_error "Failed to apply $manifest after $max_attempts attempts"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
else
|
||||
print_warning "Manifest $manifest not found, skipping..."
|
||||
fi
|
||||
@@ -265,6 +277,43 @@ apply_manifests() {
|
||||
print_success "Base manifests applied successfully"
|
||||
}
|
||||
|
||||
# Configure registry certificates and containerd
|
||||
configure_registry_certificates() {
|
||||
print_status "Configuring registry certificates and containerd..."
|
||||
|
||||
# Update CA certificates in the cluster
|
||||
print_status "Updating CA certificates..."
|
||||
kubectl debug node/eveai-dev-cluster-control-plane -it --image=busybox -- sh -c "
|
||||
chroot /host update-ca-certificates 2>/dev/null || true
|
||||
" 2>/dev/null || print_warning "Certificate update may have failed"
|
||||
|
||||
# Create containerd registry configuration directory
|
||||
print_status "Creating containerd registry configuration..."
|
||||
kubectl debug node/eveai-dev-cluster-control-plane -it --image=busybox -- sh -c "
|
||||
chroot /host mkdir -p /etc/containerd/certs.d/registry.ask-eve-ai-local.com
|
||||
" 2>/dev/null || print_warning "Failed to create containerd config directory"
|
||||
|
||||
# Configure registry hosts.toml
|
||||
print_status "Configuring registry hosts.toml..."
|
||||
kubectl debug node/eveai-dev-cluster-control-plane -it --image=busybox -- sh -c "
|
||||
chroot /host sh -c 'cat > /etc/containerd/certs.d/registry.ask-eve-ai-local.com/hosts.toml << EOF
|
||||
server = \"https://registry.ask-eve-ai-local.com\"
|
||||
|
||||
[host.\"https://registry.ask-eve-ai-local.com\"]
|
||||
capabilities = [\"pull\", \"resolve\"]
|
||||
ca = [\"/usr/local/share/ca-certificates/mkcert-ca.crt\"]
|
||||
EOF'
|
||||
" 2>/dev/null || print_warning "Failed to create hosts.toml"
|
||||
|
||||
# Restart containerd to apply configuration
|
||||
print_status "Restarting containerd..."
|
||||
kubectl debug node/eveai-dev-cluster-control-plane -it --image=busybox -- sh -c "
|
||||
chroot /host systemctl restart containerd
|
||||
" 2>/dev/null || print_warning "Failed to restart containerd"
|
||||
|
||||
print_success "Registry certificates and containerd configured"
|
||||
}
|
||||
|
||||
# Verify cluster status
|
||||
verify_cluster() {
|
||||
print_status "Verifying cluster status..."
|
||||
@@ -300,10 +349,10 @@ main() {
|
||||
check_prerequisites
|
||||
create_host_directories
|
||||
create_cluster
|
||||
configure_container_limits
|
||||
verify_cri_status
|
||||
install_ingress_controller
|
||||
apply_manifests
|
||||
configure_registry_certificates
|
||||
verify_cluster
|
||||
|
||||
echo ""
|
||||
|
||||
Reference in New Issue
Block a user