diff --git a/common/utils/startup_eveai.py b/common/utils/startup_eveai.py index 77107be..f5a9d93 100644 --- a/common/utils/startup_eveai.py +++ b/common/utils/startup_eveai.py @@ -6,7 +6,8 @@ from common.extensions import cache_manager def perform_startup_actions(app): - perform_startup_invalidation(app) + pass + # perform_startup_invalidation(app) def perform_startup_invalidation(app): diff --git a/config/config.py b/config/config.py index 6bee888..df3b03a 100644 --- a/config/config.py +++ b/config/config.py @@ -326,7 +326,7 @@ class DevConfig(Config): EXPLAIN_TEMPLATE_LOADING = False # Define the nginx prefix used for the specific apps - EVEAI_APP_LOCATION_PREFIX = '/admin' + EVEAI_APP_LOCATION_PREFIX = '' EVEAI_CHAT_LOCATION_PREFIX = '/chat' CHAT_CLIENT_PREFIX = 'chat-client/chat/' diff --git a/eveai_app/__init__.py b/eveai_app/__init__.py index 90cba33..999c2e4 100644 --- a/eveai_app/__init__.py +++ b/eveai_app/__init__.py @@ -12,7 +12,6 @@ from common.models.user import User, Role, Tenant, TenantDomain import common.models.interaction import common.models.entitlements import common.models.document -from common.utils.startup_eveai import perform_startup_actions from config.logging_config import configure_logging from common.utils.security import set_tenant_session_data from common.utils.errors import register_error_handlers @@ -165,9 +164,6 @@ def create_app(config_file=None): # agent_config = cache_manager.agent_config_cache.get_config('RAG_AGENT') # app.logger.debug(f"Agent config: {agent_config}") - # Perform startup actions such as cache invalidation - perform_startup_actions(app) - app.logger.info(f"EveAI App Server Started Successfully (PID: {os.getpid()})") app.logger.info("-------------------------------------------------------------------------------------------------") return app @@ -189,20 +185,21 @@ def register_extensions(app): def register_blueprints(app): + prefix = app.config.get('EVEAI_APP_LOCATION_PREFIX', '/admin') from .views.user_views import user_bp - app.register_blueprint(user_bp) + app.register_blueprint(user_bp, url_prefix=prefix) from .views.basic_views import basic_bp - app.register_blueprint(basic_bp) + app.register_blueprint(basic_bp, url_prefix=prefix) from .views.document_views import document_bp - app.register_blueprint(document_bp) + app.register_blueprint(document_bp, url_prefix=prefix) from .views.security_views import security_bp - app.register_blueprint(security_bp) + app.register_blueprint(security_bp, url_prefix=prefix) from .views.interaction_views import interaction_bp - app.register_blueprint(interaction_bp) + app.register_blueprint(interaction_bp, url_prefix=prefix) from .views.entitlements_views import entitlements_bp - app.register_blueprint(entitlements_bp) + app.register_blueprint(entitlements_bp, url_prefix=prefix) from .views.partner_views import partner_bp - app.register_blueprint(partner_bp) + app.register_blueprint(partner_bp, url_prefix=prefix) from .views.healthz_views import healthz_bp, init_healtz app.register_blueprint(healthz_bp) init_healtz(app) diff --git a/eveai_chat_client/__init__.py b/eveai_chat_client/__init__.py index dc7e3b0..b20c7b3 100644 --- a/eveai_chat_client/__init__.py +++ b/eveai_chat_client/__init__.py @@ -8,7 +8,6 @@ import logging.config from common.extensions import (db, bootstrap, cors, csrf, session, minio_client, simple_encryption, metrics, cache_manager, content_manager) from common.models.user import Tenant, SpecialistMagicLinkTenant -from common.utils.startup_eveai import perform_startup_actions from config.logging_config import configure_logging from eveai_chat_client.utils.errors import register_error_handlers from common.utils.celery_utils import make_celery, init_celery @@ -82,9 +81,6 @@ def create_app(config_file=None): # Register template filters register_filters(app) - # Perform startup actions such as cache invalidation - perform_startup_actions(app) - app.logger.info(f"EveAI Chat Client Started Successfully (PID: {os.getpid()})") app.logger.info("-------------------------------------------------------------------------------------------------") diff --git a/scaleway/easy_deploy/promtail.yaml b/scaleway/easy_deploy/promtail.yaml new file mode 100644 index 0000000..41fc0d7 --- /dev/null +++ b/scaleway/easy_deploy/promtail.yaml @@ -0,0 +1,415 @@ +_NOTES: | + ↓ Please scroll down if needed to see the pre-defined values ↓ + + Full values are available at: https://github.com/grafana/helm-charts/tree/main/charts/promtail + + # Billing warning + + After installing this Chart, a Cockpit custom Data source will be created. + This feature does incur costs based on the volume of logs ingested. + + # Template functions documentation + + cockpit_bearer_token + Generates a Cockpit token named "k8s-logs-CLUSTER_ID" with "Push logs" + permission and returns it. Any existing token with this name will be + deleted. + + cockpit_loki_push_url + Creates a Cockpit Logs Data source named "kubernetes-logs" (if needed) + and returns its push URL. + + cockpit_promtail_scrape_config_pods + Returns a Promtail scrape config for pushing Pod logs to Cockpit. + ALL logs are scraped if no argument is provided. To only scrape logs + from specific namespaces, you can provide the namespaces as arguments. + Here are some examples: + - cockpit_promtail_scrape_config_pods + - cockpit_promtail_scrape_config_pods "kube-system" + - cockpit_promtail_scrape_config_pods "kube-system" "default" "my-app" + + cockpit_promtail_scrape_config_journal + Returns a Promtail scrape config for pushing Node system logs to + Cockpit. ALL logs are scraped if no argument is provided. To only scrape + logs from specific namespaces you can provide the syslog identifiers + as arguments. Here are some examples: + - cockpit_promtail_scrape_config_journal + - cockpit_promtail_scrape_config_journal "kubelet" + - cockpit_promtail_scrape_config_journal "kubelet" "sshd" "systemd" +affinity: {} +annotations: {} +automountServiceAccountToken: true +config: + clients: + - bearer_token: 6gx1HemmAehGC9q0EHBfCRrQDyjfCttvypMSkEXXR43qseiKfTSBIdVUhKrH_Lfb + url: https://f191356f-5685-4ed9-a1e6-46541ecb560a.logs.cockpit.fr-par.scw.cloud/loki/api/v1/push + enableTracing: false + enabled: true + file: | + server: + log_level: {{ .Values.config.logLevel }} + log_format: {{ .Values.config.logFormat }} + http_listen_port: {{ .Values.config.serverPort }} + {{- with .Values.httpPathPrefix }} + http_path_prefix: {{ . }} + {{- end }} + {{- tpl .Values.config.snippets.extraServerConfigs . | nindent 2 }} + + clients: + {{- tpl (toYaml .Values.config.clients) . | nindent 2 }} + + positions: + {{- tpl (toYaml .Values.config.positions) . | nindent 2 }} + + scrape_configs: + {{- tpl .Values.config.snippets.scrapeConfigs . | nindent 2 }} + {{- tpl .Values.config.snippets.extraScrapeConfigs . | nindent 2 }} + + limits_config: + {{- tpl .Values.config.snippets.extraLimitsConfig . | nindent 2 }} + + tracing: + enabled: {{ .Values.config.enableTracing }} + logFormat: logfmt + logLevel: info + positions: + filename: /run/promtail/positions.yaml + serverPort: 3101 + snippets: + addScrapeJobLabel: false + common: + - action: replace + source_labels: + - __meta_kubernetes_pod_node_name + target_label: node_name + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + replacement: $1 + separator: / + source_labels: + - namespace + - app + target_label: job + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - action: replace + replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - action: replace + regex: true/(.*) + replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash + - __meta_kubernetes_pod_annotation_kubernetes_io_config_hash + - __meta_kubernetes_pod_container_name + target_label: __path__ + extraLimitsConfig: | + # When true, enforces rate limiting on this instance of Promtail. + readline_rate_enabled: true + # The rate limit in log lines per second that this instance of Promtail may push to Cockpit. + readline_rate: 10000 + # The cap in the quantity of burst lines that this instance of Promtail may push to Cockpit. + readline_burst: 10000 + # When true, exceeding the rate limit causes this instance of Promtail to discard + # log lines, rather than sending them to Cockpit. When false, exceeding the rate limit + # causes this instance of Promtail to temporarily hold off on sending the log lines and retry later. + readline_rate_drop: true + extraRelabelConfigs: [] + extraScrapeConfigs: "" + extraServerConfigs: "" + pipelineStages: + - cri: {} + scrapeConfigs: | + - job_name: kubernetes-pods + pipeline_stages: + - cri: {} + - labeldrop: + - filename + - stream + + # -- BEGIN: filters om health/probe ruis te droppen -- + # 1) Drop alle requests met kube-probe user-agent + - drop: + expression: 'kube-probe' + + # 2) Drop alle health endpoints (met of zonder underscore en met/zonder suffix) + # Matcht: /healthz, /healthz/ready, /healthz/live, /_healthz, /_healthz/ready, /_healthz/live + - drop: + expression: '(/_?healthz(?:/ready|/live)?\b)' + # -- END: filters -- + kubernetes_sd_configs: + - role: pod + relabel_configs: + # filter + - action: keep + source_labels: + - __meta_kubernetes_namespace + regex: .* + # static labels + - action: replace + replacement: eveai-staging + target_label: cluster + - action: replace + replacement: pod + target_label: type + - action: replace + replacement: easydeploy-promtail + target_label: from + # dynamic labels + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - action: replace + replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + - action: replace + replacement: /var/log/pods/*$1/*.log + regex: true/(.*) + separator: / + source_labels: + - __meta_kubernetes_pod_annotationpresent_kubernetes_io_config_hash + - __meta_kubernetes_pod_annotation_kubernetes_io_config_hash + - __meta_kubernetes_pod_container_name + target_label: __path__ + + - job_name: journal + journal: + json: false + max_age: 12h + path: /var/log/journal + relabel_configs: + # filter + - source_labels: ["__journal_syslog_identifier"] + regex: .* + action: keep + # static labels + - action: replace + replacement: eveai-staging + target_label: cluster + - action: replace + replacement: journal + target_label: type + - action: replace + replacement: easydeploy-promtail + target_label: from + # dynamic labels + - source_labels: ["__journal__hostname"] + target_label: host + - source_labels: ["__journal_syslog_identifier"] + target_label: syslog_identifier +configmap: + enabled: false +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true +daemonset: + autoscaling: + controlledResources: [] + enabled: false + maxAllowed: {} + minAllowed: {} + enabled: true +defaultVolumeMounts: +- mountPath: /run/promtail + name: run +- mountPath: /var/lib/docker/containers + name: containers + readOnly: true +- mountPath: /var/log/pods + name: pods + readOnly: true +defaultVolumes: +- hostPath: + path: /run/promtail + name: run +- hostPath: + path: /var/lib/docker/containers + name: containers +- hostPath: + path: /var/log/pods + name: pods +deployment: + autoscaling: + enabled: false + maxReplicas: 10 + minReplicas: 1 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: null + enabled: false + replicaCount: 1 + strategy: + type: RollingUpdate +enableServiceLinks: true +extraArgs: [] +extraContainers: {} +extraEnv: [] +extraEnvFrom: [] +extraObjects: [] +extraPorts: {} +extraVolumeMounts: +- mountPath: /var/log/journal + name: journal + readOnly: true +extraVolumes: +- hostPath: + path: /var/log/journal + name: journal +fullnameOverride: null +global: + imagePullSecrets: [] + imageRegistry: "" +hostAliases: [] +hostNetwork: null +httpPathPrefix: "" +image: + pullPolicy: IfNotPresent + registry: docker.io + repository: grafana/promtail + tag: "" +imagePullSecrets: [] +initContainer: [] +livenessProbe: {} +nameOverride: null +namespace: null +networkPolicy: + enabled: false + k8sApi: + cidrs: [] + port: 8443 + metrics: + cidrs: [] + namespaceSelector: {} + podSelector: {} +nodeSelector: {} +podAnnotations: {} +podLabels: {} +podSecurityContext: + runAsGroup: 0 + runAsUser: 0 +podSecurityPolicy: + allowPrivilegeEscalation: true + fsGroup: + rule: RunAsAny + hostIPC: false + hostNetwork: false + hostPID: false + privileged: true + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - hostPath + - downwardAPI +priorityClassName: null +rbac: + create: true + pspEnabled: false +readinessProbe: + failureThreshold: 5 + httpGet: + path: '{{ printf `%s/ready` .Values.httpPathPrefix }}' + port: http-metrics + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +resources: {} +secret: + annotations: {} + labels: {} +service: + annotations: {} + enabled: false + labels: {} +serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: true + imagePullSecrets: [] + name: null +serviceMonitor: + annotations: {} + enabled: false + interval: null + labels: {} + metricRelabelings: [] + namespace: null + namespaceSelector: {} + prometheusRule: + additionalLabels: {} + enabled: false + rules: [] + relabelings: [] + scheme: http + scrapeTimeout: null + targetLabels: [] + tlsConfig: null +sidecar: + configReloader: + config: + serverPort: 9533 + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + enabled: false + extraArgs: [] + extraEnv: [] + extraEnvFrom: [] + image: + pullPolicy: IfNotPresent + registry: ghcr.io + repository: jimmidyson/configmap-reload + tag: v0.12.0 + livenessProbe: {} + readinessProbe: {} + resources: {} + serviceMonitor: + enabled: true +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists +- effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists +updateStrategy: {}