417 lines
13 KiB
Bash
417 lines
13 KiB
Bash
#!/bin/bash
|
|
# Kubernetes Core Functions
|
|
# File: k8s-functions.sh
|
|
|
|
# Deploy a service group
|
|
deploy_service_group() {
|
|
local group=$1
|
|
|
|
log_operation "INFO" "Deploying service group: $group"
|
|
|
|
if [[ -z "$K8S_CONFIG_DIR" ]]; then
|
|
log_operation "ERROR" "K8S_CONFIG_DIR not set"
|
|
return 1
|
|
fi
|
|
|
|
# Get YAML files for the group
|
|
local yaml_files
|
|
yaml_files=$(get_yaml_files_for_group "$group")
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
log_operation "ERROR" "Failed to get YAML files for group: $group"
|
|
return 1
|
|
fi
|
|
|
|
# Check dependencies first
|
|
if ! check_group_dependencies "$group"; then
|
|
log_operation "WARNING" "Some dependencies not satisfied, but proceeding with deployment"
|
|
fi
|
|
|
|
# Deploy each YAML file
|
|
local success=true
|
|
for yaml_file in $yaml_files; do
|
|
local full_path="$K8S_CONFIG_DIR/$yaml_file"
|
|
|
|
if [[ ! -f "$full_path" ]]; then
|
|
log_operation "ERROR" "YAML file not found: $full_path"
|
|
success=false
|
|
continue
|
|
fi
|
|
|
|
log_operation "INFO" "Applying YAML file: $yaml_file"
|
|
log_kubectl_command "kubectl apply -f $full_path"
|
|
|
|
if kubectl apply -f "$full_path"; then
|
|
log_operation "SUCCESS" "Successfully applied: $yaml_file"
|
|
else
|
|
log_operation "ERROR" "Failed to apply: $yaml_file"
|
|
success=false
|
|
fi
|
|
done
|
|
|
|
if [[ "$success" == "true" ]]; then
|
|
log_operation "SUCCESS" "Service group '$group' deployed successfully"
|
|
|
|
# Wait for services to be ready
|
|
wait_for_group_ready "$group"
|
|
return 0
|
|
else
|
|
log_operation "ERROR" "Failed to deploy service group '$group'"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Stop a service group
|
|
stop_service_group() {
|
|
local group=$1
|
|
local mode=${2:-"--keep-data"} # --keep-data, --stop-only, --delete-all
|
|
|
|
log_operation "INFO" "Stopping service group: $group (mode: $mode)"
|
|
|
|
local services
|
|
services=$(get_services_in_group "$group")
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
return 1
|
|
fi
|
|
|
|
# Sort services in reverse deployment order for graceful shutdown
|
|
local service_array
|
|
read -ra service_array <<< "$services"
|
|
local sorted_services
|
|
sorted_services=$(sort_services_by_deploy_order "${service_array[@]}")
|
|
|
|
# Reverse the order
|
|
local reversed_services=()
|
|
local service_list=($sorted_services)
|
|
for ((i=${#service_list[@]}-1; i>=0; i--)); do
|
|
reversed_services+=("${service_list[i]}")
|
|
done
|
|
|
|
local success=true
|
|
for service in "${reversed_services[@]}"; do
|
|
if ! stop_individual_service "$service" "$mode"; then
|
|
success=false
|
|
fi
|
|
done
|
|
|
|
if [[ "$success" == "true" ]]; then
|
|
log_operation "SUCCESS" "Service group '$group' stopped successfully"
|
|
return 0
|
|
else
|
|
log_operation "ERROR" "Failed to stop some services in group '$group'"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Start a service group (for stopped services)
|
|
start_service_group() {
|
|
local group=$1
|
|
|
|
log_operation "INFO" "Starting service group: $group"
|
|
|
|
local services
|
|
services=$(get_services_in_group "$group")
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
return 1
|
|
fi
|
|
|
|
# Sort services by deployment order
|
|
local service_array
|
|
read -ra service_array <<< "$services"
|
|
local sorted_services
|
|
sorted_services=$(sort_services_by_deploy_order "${service_array[@]}")
|
|
|
|
local success=true
|
|
for service in $sorted_services; do
|
|
if ! start_individual_service "$service"; then
|
|
success=false
|
|
fi
|
|
done
|
|
|
|
if [[ "$success" == "true" ]]; then
|
|
log_operation "SUCCESS" "Service group '$group' started successfully"
|
|
return 0
|
|
else
|
|
log_operation "ERROR" "Failed to start some services in group '$group'"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Deploy an individual service
|
|
deploy_individual_service() {
|
|
local service=$1
|
|
local group=${2:-""}
|
|
|
|
log_operation "INFO" "Deploying individual service: $service"
|
|
|
|
# Get YAML file for the service
|
|
local yaml_file
|
|
yaml_file=$(get_yaml_file_for_service "$service")
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
return 1
|
|
fi
|
|
|
|
local full_path="$K8S_CONFIG_DIR/$yaml_file"
|
|
|
|
if [[ ! -f "$full_path" ]]; then
|
|
log_operation "ERROR" "YAML file not found: $full_path"
|
|
return 1
|
|
fi
|
|
|
|
# Check dependencies
|
|
if ! check_app_dependencies "$service"; then
|
|
log_operation "WARNING" "Dependencies not satisfied, but proceeding with deployment"
|
|
fi
|
|
|
|
log_operation "INFO" "Applying YAML file: $yaml_file for service: $service"
|
|
log_kubectl_command "kubectl apply -f $full_path"
|
|
|
|
if kubectl apply -f "$full_path"; then
|
|
log_operation "SUCCESS" "Successfully deployed service: $service"
|
|
|
|
# Wait for service to be ready
|
|
wait_for_service_ready "$service" "$K8S_NAMESPACE" 180
|
|
return 0
|
|
else
|
|
log_operation "ERROR" "Failed to deploy service: $service"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Stop an individual service
|
|
stop_individual_service() {
|
|
local service=$1
|
|
local mode=${2:-"--keep-data"}
|
|
|
|
log_operation "INFO" "Stopping individual service: $service (mode: $mode)"
|
|
|
|
case "$mode" in
|
|
"--keep-data")
|
|
# Scale deployment to 0 but keep everything else
|
|
log_kubectl_command "kubectl scale deployment $service --replicas=0 -n $K8S_NAMESPACE"
|
|
if kubectl scale deployment "$service" --replicas=0 -n "$K8S_NAMESPACE" 2>/dev/null; then
|
|
log_operation "SUCCESS" "Scaled down service: $service"
|
|
else
|
|
log_operation "WARNING" "Failed to scale down service: $service (may not exist)"
|
|
fi
|
|
;;
|
|
"--stop-only")
|
|
# Same as keep-data for Kubernetes
|
|
log_kubectl_command "kubectl scale deployment $service --replicas=0 -n $K8S_NAMESPACE"
|
|
if kubectl scale deployment "$service" --replicas=0 -n "$K8S_NAMESPACE" 2>/dev/null; then
|
|
log_operation "SUCCESS" "Stopped service: $service"
|
|
else
|
|
log_operation "WARNING" "Failed to stop service: $service (may not exist)"
|
|
fi
|
|
;;
|
|
"--delete-all")
|
|
# Delete the deployment and associated resources
|
|
log_kubectl_command "kubectl delete deployment $service -n $K8S_NAMESPACE"
|
|
if kubectl delete deployment "$service" -n "$K8S_NAMESPACE" 2>/dev/null; then
|
|
log_operation "SUCCESS" "Deleted deployment: $service"
|
|
else
|
|
log_operation "WARNING" "Failed to delete deployment: $service (may not exist)"
|
|
fi
|
|
|
|
# Also delete service if it exists
|
|
log_kubectl_command "kubectl delete service ${service}-service -n $K8S_NAMESPACE"
|
|
kubectl delete service "${service}-service" -n "$K8S_NAMESPACE" 2>/dev/null || true
|
|
;;
|
|
*)
|
|
log_operation "ERROR" "Unknown stop mode: $mode"
|
|
return 1
|
|
;;
|
|
esac
|
|
|
|
return 0
|
|
}
|
|
|
|
# Start an individual service (restore replicas)
|
|
start_individual_service() {
|
|
local service=$1
|
|
|
|
log_operation "INFO" "Starting individual service: $service"
|
|
|
|
# Check if deployment exists
|
|
if ! kubectl get deployment "$service" -n "$K8S_NAMESPACE" &>/dev/null; then
|
|
log_operation "ERROR" "Deployment '$service' does not exist. Use deploy function instead."
|
|
return 1
|
|
fi
|
|
|
|
# Get the original replica count (assuming 1 if not specified)
|
|
local desired_replicas=1
|
|
|
|
# For services that typically have multiple replicas
|
|
case "$service" in
|
|
"eveai-workers"|"eveai-chat-workers")
|
|
desired_replicas=2
|
|
;;
|
|
esac
|
|
|
|
log_kubectl_command "kubectl scale deployment $service --replicas=$desired_replicas -n $K8S_NAMESPACE"
|
|
if kubectl scale deployment "$service" --replicas="$desired_replicas" -n "$K8S_NAMESPACE"; then
|
|
log_operation "SUCCESS" "Started service: $service with $desired_replicas replicas"
|
|
|
|
# Wait for service to be ready
|
|
wait_for_service_ready "$service" "$K8S_NAMESPACE" 180
|
|
return 0
|
|
else
|
|
log_operation "ERROR" "Failed to start service: $service"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Wait for a service group to be ready
|
|
wait_for_group_ready() {
|
|
local group=$1
|
|
local timeout=${2:-300}
|
|
|
|
log_operation "INFO" "Waiting for service group '$group' to be ready"
|
|
|
|
local services
|
|
services=$(get_services_in_group "$group")
|
|
|
|
if [[ $? -ne 0 ]]; then
|
|
return 1
|
|
fi
|
|
|
|
local all_ready=true
|
|
for service in $services; do
|
|
if ! wait_for_service_ready "$service" "$K8S_NAMESPACE" "$timeout"; then
|
|
all_ready=false
|
|
log_operation "WARNING" "Service '$service' in group '$group' failed to become ready"
|
|
fi
|
|
done
|
|
|
|
if [[ "$all_ready" == "true" ]]; then
|
|
log_operation "SUCCESS" "All services in group '$group' are ready"
|
|
return 0
|
|
else
|
|
log_operation "ERROR" "Some services in group '$group' failed to become ready"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Get service status
|
|
get_service_status() {
|
|
local service=$1
|
|
local namespace=${2:-$K8S_NAMESPACE}
|
|
|
|
if ! kubectl get deployment "$service" -n "$namespace" &>/dev/null; then
|
|
echo "NOT_DEPLOYED"
|
|
return 1
|
|
fi
|
|
|
|
local ready_replicas
|
|
ready_replicas=$(kubectl get deployment "$service" -n "$namespace" -o jsonpath='{.status.readyReplicas}' 2>/dev/null)
|
|
local desired_replicas
|
|
desired_replicas=$(kubectl get deployment "$service" -n "$namespace" -o jsonpath='{.spec.replicas}' 2>/dev/null)
|
|
|
|
if [[ -z "$ready_replicas" ]]; then
|
|
ready_replicas=0
|
|
fi
|
|
|
|
if [[ -z "$desired_replicas" ]]; then
|
|
desired_replicas=0
|
|
fi
|
|
|
|
if [[ "$desired_replicas" -eq 0 ]]; then
|
|
echo "STOPPED"
|
|
elif [[ "$ready_replicas" -eq "$desired_replicas" && "$ready_replicas" -gt 0 ]]; then
|
|
echo "RUNNING"
|
|
elif [[ "$ready_replicas" -gt 0 ]]; then
|
|
echo "PARTIAL"
|
|
else
|
|
echo "STARTING"
|
|
fi
|
|
}
|
|
|
|
# Show detailed service status
|
|
show_service_status() {
|
|
local service=${1:-""}
|
|
|
|
if [[ -n "$service" ]]; then
|
|
# Show status for specific service
|
|
echo "🔍 Status for service: $service"
|
|
echo "================================"
|
|
|
|
local status
|
|
status=$(get_service_status "$service")
|
|
echo "Status: $status"
|
|
|
|
if kubectl get deployment "$service" -n "$K8S_NAMESPACE" &>/dev/null; then
|
|
echo ""
|
|
echo "Deployment details:"
|
|
kubectl get deployment "$service" -n "$K8S_NAMESPACE"
|
|
|
|
echo ""
|
|
echo "Pod details:"
|
|
kubectl get pods -l "app=$service" -n "$K8S_NAMESPACE"
|
|
|
|
echo ""
|
|
echo "Recent events:"
|
|
kubectl get events --field-selector involvedObject.name="$service" -n "$K8S_NAMESPACE" --sort-by='.lastTimestamp' | tail -5
|
|
else
|
|
echo "Deployment not found"
|
|
fi
|
|
else
|
|
# Show status for all services
|
|
echo "🔍 Service Status Overview:"
|
|
echo "=========================="
|
|
|
|
local all_services
|
|
all_services=$(get_services_in_group "all")
|
|
|
|
for svc in $all_services; do
|
|
local status
|
|
status=$(get_service_status "$svc")
|
|
|
|
local status_icon
|
|
case "$status" in
|
|
"RUNNING") status_icon="✅" ;;
|
|
"PARTIAL") status_icon="⚠️" ;;
|
|
"STARTING") status_icon="🔄" ;;
|
|
"STOPPED") status_icon="⏹️" ;;
|
|
"NOT_DEPLOYED") status_icon="❌" ;;
|
|
*) status_icon="❓" ;;
|
|
esac
|
|
|
|
echo " $svc: $status_icon $status"
|
|
done
|
|
fi
|
|
}
|
|
|
|
# Restart a service (stop and start)
|
|
restart_service() {
|
|
local service=$1
|
|
|
|
log_operation "INFO" "Restarting service: $service"
|
|
|
|
if ! stop_individual_service "$service" "--stop-only"; then
|
|
log_operation "ERROR" "Failed to stop service: $service"
|
|
return 1
|
|
fi
|
|
|
|
sleep 5
|
|
|
|
if ! start_individual_service "$service"; then
|
|
log_operation "ERROR" "Failed to start service: $service"
|
|
return 1
|
|
fi
|
|
|
|
log_operation "SUCCESS" "Successfully restarted service: $service"
|
|
}
|
|
|
|
# Export functions for use in other scripts
|
|
if [[ -n "$ZSH_VERSION" ]]; then
|
|
typeset -f deploy_service_group stop_service_group start_service_group > /dev/null
|
|
typeset -f deploy_individual_service stop_individual_service start_individual_service > /dev/null
|
|
typeset -f wait_for_group_ready get_service_status show_service_status restart_service > /dev/null
|
|
else
|
|
export -f deploy_service_group stop_service_group start_service_group
|
|
export -f deploy_individual_service stop_individual_service start_individual_service
|
|
export -f wait_for_group_ready get_service_status show_service_status restart_service
|
|
fi |