- logging improvement and simplification (no more graylog)

- Traicie Selection Specialist Round Trip
- Session improvements + debugging enabled
- Tone of Voice & Langauge Level definitions introduced
This commit is contained in:
Josako
2025-06-20 07:58:06 +02:00
parent babcd6ec04
commit 5b2c04501c
29 changed files with 916 additions and 167 deletions

67
README.md.k8s-logging Normal file
View File

@@ -0,0 +1,67 @@
# Kubernetes Logging Upgrade
## Overzicht
Deze instructies beschrijven hoe je alle services moet bijwerken om de nieuwe logging configuratie te gebruiken die zowel compatibel is met traditionele bestandsgebaseerde logging (voor ontwikkeling/test) als met Kubernetes (voor productie).
## Stappen voor elke service
Pas de volgende wijzigingen toe in elk van de volgende services:
- eveai_app
- eveai_workers
- eveai_api
- eveai_chat_client
- eveai_chat_workers
- eveai_beat
- eveai_entitlements
### 1. Update de imports
Verander:
```python
from config.logging_config import LOGGING
```
Naar:
```python
from config.logging_config import configure_logging
```
### 2. Update de logging configuratie
Verander:
```python
logging.config.dictConfig(LOGGING)
```
Naar:
```python
configure_logging()
```
## Dockerfile Aanpassingen
Voeg de volgende regels toe aan je Dockerfile voor elke service om de Kubernetes-specifieke logging afhankelijkheden te installeren (alleen voor productie):
```dockerfile
# Alleen voor productie (Kubernetes) builds
COPY requirements-k8s.txt /app/
RUN if [ "$ENVIRONMENT" = "production" ]; then pip install -r requirements-k8s.txt; fi
```
## Kubernetes Deployment
Zorg ervoor dat je Kubernetes deployment manifests de volgende omgevingsvariabele bevatten:
```yaml
env:
- name: FLASK_ENV
value: "production"
```
## Voordelen
1. De code detecteert automatisch of deze in Kubernetes draait
2. In ontwikkeling/test omgevingen blijft alles naar bestanden schrijven
3. In Kubernetes gaan logs naar stdout/stderr in JSON-formaat
4. Geen wijzigingen nodig in bestaande logger code in de applicatie

View File

@@ -1,5 +1,5 @@
version: "1.0.0" version: "1.0.0"
name: "Traicie HR BP " name: "Traicie Recruiter"
role: > role: >
You are an Expert Recruiter working for {tenant_name} You are an Expert Recruiter working for {tenant_name}
{custom_role} {custom_role}
@@ -20,6 +20,6 @@ full_model_name: "mistral.mistral-medium-latest"
temperature: 0.3 temperature: 0.3
metadata: metadata:
author: "Josako" author: "Josako"
date_added: "2025-05-21" date_added: "2025-06-18"
description: "HR BP Agent." description: "HR BP Agent."
changes: "Initial version" changes: "Initial version"

View File

@@ -12,10 +12,7 @@ class Config(object):
DEBUG = False DEBUG = False
DEVELOPMENT = False DEVELOPMENT = False
SECRET_KEY = environ.get('SECRET_KEY') SECRET_KEY = environ.get('SECRET_KEY')
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
COMPONENT_NAME = environ.get('COMPONENT_NAME') COMPONENT_NAME = environ.get('COMPONENT_NAME')
SESSION_KEY_PREFIX = f'{COMPONENT_NAME}_'
# Database Settings # Database Settings
DB_HOST = environ.get('DB_HOST') DB_HOST = environ.get('DB_HOST')
@@ -44,8 +41,6 @@ class Config(object):
# SECURITY_POST_CHANGE_VIEW = '/admin/login' # SECURITY_POST_CHANGE_VIEW = '/admin/login'
# SECURITY_BLUEPRINT_NAME = 'security_bp' # SECURITY_BLUEPRINT_NAME = 'security_bp'
SECURITY_PASSWORD_SALT = environ.get('SECURITY_PASSWORD_SALT') SECURITY_PASSWORD_SALT = environ.get('SECURITY_PASSWORD_SALT')
REMEMBER_COOKIE_SAMESITE = 'strict'
SESSION_COOKIE_SAMESITE = 'Lax'
SECURITY_CONFIRMABLE = True SECURITY_CONFIRMABLE = True
SECURITY_TRACKABLE = True SECURITY_TRACKABLE = True
SECURITY_PASSWORD_COMPLEXITY_CHECKER = 'zxcvbn' SECURITY_PASSWORD_COMPLEXITY_CHECKER = 'zxcvbn'
@@ -56,6 +51,10 @@ class Config(object):
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE = 'Your Password Has Been Reset' SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE = 'Your Password Has Been Reset'
SECURITY_EMAIL_PLAINTEXT = False SECURITY_EMAIL_PLAINTEXT = False
SECURITY_EMAIL_HTML = True SECURITY_EMAIL_HTML = True
SECURITY_SESSION_PROTECTION = 'basic' # of 'basic' als 'strong' problemen geeft
SECURITY_REMEMBER_TOKEN_VALIDITY = timedelta(minutes=60) # Zelfde als session lifetime
SECURITY_AUTO_LOGIN_AFTER_CONFIRM = True
SECURITY_AUTO_LOGIN_AFTER_RESET = True
# Ensure Flask-Security-Too is handling CSRF tokens when behind a proxy # Ensure Flask-Security-Too is handling CSRF tokens when behind a proxy
SECURITY_CSRF_PROTECT_MECHANISMS = ['session'] SECURITY_CSRF_PROTECT_MECHANISMS = ['session']
@@ -189,6 +188,15 @@ class Config(object):
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60) PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
SESSION_REFRESH_EACH_REQUEST = True SESSION_REFRESH_EACH_REQUEST = True
SESSION_COOKIE_NAME = f'{COMPONENT_NAME}_session'
SESSION_COOKIE_DOMAIN = None # Laat Flask dit automatisch bepalen
SESSION_COOKIE_PATH = '/'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = False # True voor production met HTTPS
SESSION_COOKIE_SAMESITE = 'Lax'
REMEMBER_COOKIE_SAMESITE = 'strict'
SESSION_KEY_PREFIX = f'{COMPONENT_NAME}_'
# JWT settings # JWT settings
JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY') JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY')
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1) # Set token expiry to 1 hour JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1) # Set token expiry to 1 hour

View File

@@ -1,15 +1,13 @@
import json import json
import os import os
import sys
from datetime import datetime as dt, timezone as tz from datetime import datetime as dt, timezone as tz
from flask import current_app from flask import current_app
from graypy import GELFUDPHandler
import logging import logging
import logging.config import logging.config
# Graylog configuration
GRAYLOG_HOST = os.environ.get('GRAYLOG_HOST', 'localhost')
GRAYLOG_PORT = int(os.environ.get('GRAYLOG_PORT', 12201))
env = os.environ.get('FLASK_ENV', 'development') env = os.environ.get('FLASK_ENV', 'development')
@@ -144,23 +142,6 @@ class TuningFormatter(logging.Formatter):
return formatted_msg return formatted_msg
class GraylogFormatter(logging.Formatter):
"""Maintains existing Graylog formatting while adding tuning fields"""
def format(self, record):
if getattr(record, 'is_tuning_log', False):
# Add tuning-specific fields to Graylog
record.tuning_fields = {
'is_tuning_log': True,
'tuning_type': record.tuning_type,
'tenant_id': record.tenant_id,
'catalog_id': record.catalog_id,
'specialist_id': record.specialist_id,
'retriever_id': record.retriever_id,
'processor_id': record.processor_id,
'session_id': record.session_id,
}
return super().format(record)
class TuningLogger: class TuningLogger:
"""Helper class to manage tuning logs with consistent structure""" """Helper class to manage tuning logs with consistent structure"""
@@ -177,10 +158,10 @@ class TuningLogger:
specialist_id: Optional specialist ID for context specialist_id: Optional specialist ID for context
retriever_id: Optional retriever ID for context retriever_id: Optional retriever ID for context
processor_id: Optional processor ID for context processor_id: Optional processor ID for context
session_id: Optional session ID for context and log file naming session_id: Optional session ID for context
log_file: Optional custom log file name to use log_file: Optional custom log file name (ignored - all logs go to tuning.log)
""" """
# Always use the standard tuning logger
self.logger = logging.getLogger(logger_name) self.logger = logging.getLogger(logger_name)
self.tenant_id = tenant_id self.tenant_id = tenant_id
self.catalog_id = catalog_id self.catalog_id = catalog_id
@@ -188,63 +169,8 @@ class TuningLogger:
self.retriever_id = retriever_id self.retriever_id = retriever_id
self.processor_id = processor_id self.processor_id = processor_id
self.session_id = session_id self.session_id = session_id
self.log_file = log_file
# Determine whether to use a session-specific logger
if session_id:
# Create a unique logger name for this session
session_logger_name = f"{logger_name}_{session_id}"
self.logger = logging.getLogger(session_logger_name)
# If this logger doesn't have handlers yet, configure it def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DEBUG):
if not self.logger.handlers:
# Determine log file path
if not log_file and session_id:
log_file = f"logs/tuning_{session_id}.log"
elif not log_file:
log_file = "logs/tuning.log"
# Configure the logger
self._configure_session_logger(log_file)
else:
# Use the standard tuning logger
self.logger = logging.getLogger(logger_name)
def _configure_session_logger(self, log_file):
"""Configure a new session-specific logger with appropriate handlers"""
# Create and configure a file handler
file_handler = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=1024 * 1024 * 3, # 3MB
backupCount=3
)
file_handler.setFormatter(TuningFormatter())
file_handler.setLevel(logging.DEBUG)
# Add the file handler to the logger
self.logger.addHandler(file_handler)
# Add Graylog handler in production
env = os.environ.get('FLASK_ENV', 'development')
if env == 'production':
try:
graylog_handler = GELFUDPHandler(
host=GRAYLOG_HOST,
port=GRAYLOG_PORT,
debugging_fields=True
)
graylog_handler.setFormatter(GraylogFormatter())
self.logger.addHandler(graylog_handler)
except Exception as e:
# Fall back to just file logging if Graylog setup fails
fallback_logger = logging.getLogger('eveai_app')
fallback_logger.warning(f"Failed to set up Graylog handler: {str(e)}")
# Set logger level and disable propagation
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DEBUG):
"""Log a tuning event with structured data""" """Log a tuning event with structured data"""
try: try:
# Create a standard LogRecord for tuning # Create a standard LogRecord for tuning
@@ -275,13 +201,82 @@ def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DE
self.logger.handle(record) self.logger.handle(record)
except Exception as e: except Exception as e:
fallback_logger = logging.getLogger('eveai_workers') print(f"Failed to log tuning message: {str(e)}")
fallback_logger.exception(f"Failed to log tuning message: {str(e)}")
# Set the custom log record factory # Set the custom log record factory
logging.setLogRecordFactory(TuningLogRecord) logging.setLogRecordFactory(TuningLogRecord)
def configure_logging():
"""Configure logging based on environment
When running in Kubernetes, directs logs to stdout in JSON format
Otherwise uses file-based logging for development/testing
"""
try:
# Verkrijg het absolute pad naar de logs directory
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
logs_dir = os.path.join(base_dir, 'logs')
# Zorg ervoor dat de logs directory bestaat met de juiste permissies
if not os.path.exists(logs_dir):
try:
os.makedirs(logs_dir, exist_ok=True)
print(f"Logs directory aangemaakt op: {logs_dir}")
except (IOError, PermissionError) as e:
print(f"WAARSCHUWING: Kan logs directory niet aanmaken: {e}")
print(f"Logs worden mogelijk niet correct geschreven!")
# Check if running in Kubernetes
in_kubernetes = os.environ.get('KUBERNETES_SERVICE_HOST') is not None
# Controleer of de pythonjsonlogger pakket beschikbaar is als we in Kubernetes zijn
if in_kubernetes:
try:
import pythonjsonlogger.jsonlogger
has_json_logger = True
except ImportError:
print("WAARSCHUWING: python-json-logger pakket is niet geïnstalleerd.")
print("Voer 'pip install python-json-logger>=2.0.7' uit om JSON logging in te schakelen.")
print("Terugvallen op standaard logging formaat.")
has_json_logger = False
in_kubernetes = False # Fall back to standard logging
else:
has_json_logger = False
# Apply the configuration
logging_config = dict(LOGGING)
# Wijzig de json_console handler om terug te vallen op console als pythonjsonlogger niet beschikbaar is
if not has_json_logger and 'json_console' in logging_config['handlers']:
# Vervang json_console handler door een console handler met standaard formatter
logging_config['handlers']['json_console']['formatter'] = 'standard'
# In Kubernetes, conditionally modify specific loggers to use JSON console output
# This preserves the same logger names but changes where/how they log
if in_kubernetes:
for logger_name in logging_config['loggers']:
if logger_name: # Skip the root logger
logging_config['loggers'][logger_name]['handlers'] = ['json_console']
# Controleer of de logs directory schrijfbaar is voordat we de configuratie toepassen
logs_dir = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs')
if os.path.exists(logs_dir) and not os.access(logs_dir, os.W_OK):
print(f"WAARSCHUWING: Logs directory bestaat maar is niet schrijfbaar: {logs_dir}")
print("Logs worden mogelijk niet correct geschreven!")
logging.config.dictConfig(logging_config)
logging.info(f"Logging configured. Environment: {'Kubernetes' if in_kubernetes else 'Development/Testing'}")
logging.info(f"Logs directory: {logs_dir}")
except Exception as e:
print(f"Error configuring logging: {str(e)}")
print("Gedetailleerde foutinformatie:")
import traceback
traceback.print_exc()
# Fall back to basic configuration
logging.basicConfig(level=logging.INFO)
LOGGING = { LOGGING = {
'version': 1, 'version': 1,
@@ -290,7 +285,7 @@ LOGGING = {
'file_app': { 'file_app': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_app.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_app.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -298,7 +293,7 @@ LOGGING = {
'file_workers': { 'file_workers': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_workers.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_workers.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -306,7 +301,7 @@ LOGGING = {
'file_chat_client': { 'file_chat_client': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_chat_client.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_chat_client.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -314,7 +309,7 @@ LOGGING = {
'file_chat_workers': { 'file_chat_workers': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_chat_workers.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_chat_workers.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -322,7 +317,7 @@ LOGGING = {
'file_api': { 'file_api': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_api.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_api.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -330,7 +325,7 @@ LOGGING = {
'file_beat': { 'file_beat': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_beat.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_beat.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -338,7 +333,7 @@ LOGGING = {
'file_entitlements': { 'file_entitlements': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_entitlements.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_entitlements.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -346,7 +341,7 @@ LOGGING = {
'file_sqlalchemy': { 'file_sqlalchemy': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/sqlalchemy.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'sqlalchemy.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -354,7 +349,7 @@ LOGGING = {
'file_security': { 'file_security': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/security.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'security.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -362,7 +357,7 @@ LOGGING = {
'file_rag_tuning': { 'file_rag_tuning': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/rag_tuning.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'rag_tuning.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -370,7 +365,7 @@ LOGGING = {
'file_embed_tuning': { 'file_embed_tuning': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/embed_tuning.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'embed_tuning.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -378,7 +373,7 @@ LOGGING = {
'file_business_events': { 'file_business_events': {
'level': 'INFO', 'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/business_events.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'business_events.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB 'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2, 'backupCount': 2,
'formatter': 'standard', 'formatter': 'standard',
@@ -388,98 +383,102 @@ LOGGING = {
'level': 'DEBUG', 'level': 'DEBUG',
'formatter': 'standard', 'formatter': 'standard',
}, },
'json_console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'json',
'stream': 'ext://sys.stdout',
},
'tuning_file': { 'tuning_file': {
'level': 'DEBUG', 'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/tuning.log', 'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'tuning.log'),
'maxBytes': 1024 * 1024 * 3, # 3MB 'maxBytes': 1024 * 1024 * 3, # 3MB
'backupCount': 3, 'backupCount': 3,
'formatter': 'tuning', 'formatter': 'tuning',
}, },
'graylog': {
'level': 'DEBUG',
'class': 'graypy.GELFUDPHandler',
'host': GRAYLOG_HOST,
'port': GRAYLOG_PORT,
'debugging_fields': True,
'formatter': 'graylog'
},
}, },
'formatters': { 'formatters': {
'standard': { 'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d]: %(message)s', 'format': '%(asctime)s [%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S' 'datefmt': '%Y-%m-%d %H:%M:%S'
}, },
'graylog': {
'format': '[%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d in %(funcName)s] '
'[Thread: %(threadName)s]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'()': GraylogFormatter
},
'tuning': { 'tuning': {
'()': TuningFormatter, '()': TuningFormatter,
'datefmt': '%Y-%m-%d %H:%M:%S UTC' 'datefmt': '%Y-%m-%d %H:%M:%S UTC'
},
'json': {
'format': '%(message)s',
'class': 'logging.Formatter' if not 'pythonjsonlogger' in sys.modules else 'pythonjsonlogger.jsonlogger.JsonFormatter',
'json_default': lambda obj: str(obj) if isinstance(obj, (dt, Exception)) else None,
'json_ensure_ascii': False,
'rename_fields': {
'asctime': 'timestamp',
'levelname': 'severity'
},
'timestamp': True,
'datefmt': '%Y-%m-%dT%H:%M:%S.%fZ'
} }
}, },
'loggers': { 'loggers': {
'eveai_app': { # logger for the eveai_app 'eveai_app': { # logger for the eveai_app
'handlers': ['file_app', 'graylog', ] if env == 'production' else ['file_app', ], 'handlers': ['file_app'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'eveai_workers': { # logger for the eveai_workers 'eveai_workers': { # logger for the eveai_workers
'handlers': ['file_workers', 'graylog', ] if env == 'production' else ['file_workers', ], 'handlers': ['file_workers'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'eveai_chat_client': { # logger for the eveai_chat 'eveai_chat_client': { # logger for the eveai_chat
'handlers': ['file_chat_client', 'graylog', ] if env == 'production' else ['file_chat_client', ], 'handlers': ['file_chat_client'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'eveai_chat_workers': { # logger for the eveai_chat_workers 'eveai_chat_workers': { # logger for the eveai_chat_workers
'handlers': ['file_chat_workers', 'graylog', ] if env == 'production' else ['file_chat_workers', ], 'handlers': ['file_chat_workers'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'eveai_api': { # logger for the eveai_chat_workers 'eveai_api': { # logger for the eveai_api
'handlers': ['file_api', 'graylog', ] if env == 'production' else ['file_api', ], 'handlers': ['file_api'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'eveai_beat': { # logger for the eveai_beat 'eveai_beat': { # logger for the eveai_beat
'handlers': ['file_beat', 'graylog', ] if env == 'production' else ['file_beat', ], 'handlers': ['file_beat'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'eveai_entitlements': { # logger for the eveai_entitlements 'eveai_entitlements': { # logger for the eveai_entitlements
'handlers': ['file_entitlements', 'graylog', ] if env == 'production' else ['file_entitlements', ], 'handlers': ['file_entitlements'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'sqlalchemy.engine': { # logger for the sqlalchemy 'sqlalchemy.engine': { # logger for the sqlalchemy
'handlers': ['file_sqlalchemy', 'graylog', ] if env == 'production' else ['file_sqlalchemy', ], 'handlers': ['file_sqlalchemy'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'security': { # logger for the security 'security': { # logger for the security
'handlers': ['file_security', 'graylog', ] if env == 'production' else ['file_security', ], 'handlers': ['file_security'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'business_events': { 'business_events': {
'handlers': ['file_business_events', 'graylog'], 'handlers': ['file_business_events'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
# Single tuning logger # Single tuning logger
'tuning': { 'tuning': {
'handlers': ['tuning_file', 'graylog'] if env == 'production' else ['tuning_file'], 'handlers': ['tuning_file'],
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False, 'propagate': False,
}, },
'': { # root logger '': { # root logger
'handlers': ['console'], 'handlers': ['console'] if os.environ.get('KUBERNETES_SERVICE_HOST') is None else ['json_console'],
'level': 'WARNING', # Set higher level for root to minimize noise 'level': 'WARNING', # Set higher level for root to minimize noise
'propagate': False 'propagate': False
}, },

View File

@@ -1,4 +1,4 @@
version: "1.1.0" version: "1.3.0"
name: "Traicie Selection Specialist" name: "Traicie Selection Specialist"
framework: "crewai" framework: "crewai"
partner: "traicie" partner: "traicie"
@@ -108,13 +108,13 @@ results:
description: "List of vacancy competencies and their descriptions" description: "List of vacancy competencies and their descriptions"
required: false required: false
agents: agents:
- type: "TRAICIE_HR_BP_AGENT" - type: "TRAICIE_RECRUITER"
version: "1.0" version: "1.0"
tasks: tasks:
- type: "TRAICIE_GET_COMPETENCIES_TASK" - type: "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION"
version: "1.1" version: "1.0"
metadata: metadata:
author: "Josako" author: "Josako"
date_added: "2025-05-27" date_added: "2025-06-16"
changes: "Add make to the selection specialist" changes: "Realising the actual interaction with the LLM"
description: "Assistant to create a new Vacancy based on Vacancy Text" description: "Assistant to create a new Vacancy based on Vacancy Text"

View File

@@ -0,0 +1,120 @@
version: "1.3.0"
name: "Traicie Selection Specialist"
framework: "crewai"
partner: "traicie"
chat: false
configuration:
name:
name: "Name"
description: "The name the specialist is called upon."
type: "str"
required: true
role_reference:
name: "Role Reference"
description: "A customer reference to the role"
type: "str"
required: false
make:
name: "Make"
description: "The make for which the role is defined and the selection specialist is created"
type: "system"
system_name: "tenant_make"
required: true
competencies:
name: "Competencies"
description: "An ordered list of competencies."
type: "ordered_list"
list_type: "competency_details"
required: true
tone_of_voice:
name: "Tone of Voice"
description: "The tone of voice the specialist uses to communicate"
type: "enum"
allowed_values: ["Professional & Neutral", "Warm & Empathetic", "Energetic & Enthusiastic", "Accessible & Informal", "Expert & Trustworthy", "No-nonsense & Goal-driven"]
default: "Professional & Neutral"
required: true
language_level:
name: "Language Level"
description: "Language level to be used when communicating, relating to CEFR levels"
type: "enum"
allowed_values: ["Basic", "Standard", "Professional"]
default: "Standard"
required: true
welcome_message:
name: "Welcome Message"
description: "Introductory text given by the specialist - but translated according to Tone of Voice, Language Level and Starting Language"
type: "text"
required: false
closing_message:
name: "Closing Message"
description: "Closing message given by the specialist - but translated according to Tone of Voice, Language Level and Starting Language"
type: "text"
required: false
competency_details:
title:
name: "Title"
description: "Competency Title"
type: "str"
required: true
description:
name: "Description"
description: "Description (in context of the role) of the competency"
type: "text"
required: true
is_knockout:
name: "KO"
description: "Defines if the competency is a knock-out criterium"
type: "boolean"
required: true
default: false
assess:
name: "Assess"
description: "Indication if this competency is to be assessed"
type: "boolean"
required: true
default: true
arguments:
region:
name: "Region"
type: "str"
description: "The region of the specific vacancy"
required: false
working_schedule:
name: "Work Schedule"
type: "str"
description: "The work schedule or employment type of the specific vacancy"
required: false
start_date:
name: "Start Date"
type: "date"
description: "The start date of the specific vacancy"
required: false
language:
name: "Language"
type: "str"
description: "The language (2-letter code) used to start the conversation"
required: true
interaction_mode:
name: "Interaction Mode"
type: "enum"
description: "The interaction mode the specialist will start working in."
allowed_values: ["Job Application", "Seduction"]
default: "Job Application"
required: true
results:
competencies:
name: "competencies"
type: "List[str, str]"
description: "List of vacancy competencies and their descriptions"
required: false
agents:
- type: "TRAICIE_RECRUITER_AGENT"
version: "1.0"
tasks:
- type: "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK"
version: "1.0"
metadata:
author: "Josako"
date_added: "2025-06-18"
changes: "Add make to the selection specialist"
description: "Assistant to create a new Vacancy based on Vacancy Text"

View File

@@ -5,17 +5,24 @@ task_description: >
(both description and title). The criteria are in between triple backquotes.You need to prepare for the interviews, (both description and title). The criteria are in between triple backquotes.You need to prepare for the interviews,
and are to provide for each of these ko criteria: and are to provide for each of these ko criteria:
- A question to ask the recruitment candidate describing the context of the ko criterium. Use your experience to not - A short question to ask the recruitment candidate describing the context of the ko criterium. Use your experience to
just ask a closed question, but a question from which you can indirectly derive a positive or negative qualification ask a question that enables us to verify compliancy to the criterium.
of the criterium based on the answer of the candidate. - A set of 2 short answers to that question, from the candidates perspective. One of the answers will result in a
- A set of max 5 answers on that question, from the candidates perspective. One of the answers will result in a positive evaluation of the criterium, the other one in a negative evaluation. Mark each of the answers as positive
positive evaluation of the criterium, the other ones in a negative evaluation. Mark each of the answers as positive
or negative. or negative.
Describe the answers from the perspective of the candidate. Be sure to include all necessary aspects in you answers. Describe the answers from the perspective of the candidate. Be sure to include all necessary aspects in you answers.
Apply the following tone of voice in both questions and answers: {tone_of_voice} Apply the following tone of voice in both questions and answers: {tone_of_voice}
Use the following description to understand tone of voice:
{tone_of_voice_context}
Apply the following language level in both questions and answers: {language_level} Apply the following language level in both questions and answers: {language_level}
Use {language} as language for both questions and answers. Use {language} as language for both questions and answers.
Use the following description to understand language_level:
{language_level_context}
```{ko_criteria}``` ```{ko_criteria}```
@@ -25,7 +32,8 @@ expected_output: >
For each of the ko criteria, you provide: For each of the ko criteria, you provide:
- the exact title as specified in the original language - the exact title as specified in the original language
- the question in {language} - the question in {language}
- a set of answers, with for each answer an indication if it is the correct answer, or a false response. In {language}. - a positive answer, resulting in a positive evaluation of the criterium. In {language}.
- a negative answer, resulting in a negative evaluation of the criterium. In {language}.
{custom_expected_output} {custom_expected_output}
metadata: metadata:
author: "Josako" author: "Josako"

View File

@@ -32,5 +32,10 @@ AGENT_TYPES = {
"name": "Traicie HR BP Agent", "name": "Traicie HR BP Agent",
"description": "An HR Business Partner Agent", "description": "An HR Business Partner Agent",
"partner": "traicie" "partner": "traicie"
} },
"TRAICIE_RECRUITER_AGENT": {
"name": "Traicie Recruiter Agent",
"description": "An Senior Recruiter Agent",
"partner": "traicie"
},
} }

View File

@@ -41,5 +41,10 @@ TASK_TYPES = {
"name": "Traicie Get KO Criteria", "name": "Traicie Get KO Criteria",
"description": "A Task to get KO Criteria from a Vacancy Text", "description": "A Task to get KO Criteria from a Vacancy Text",
"partner": "traicie" "partner": "traicie"
},
"TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK": {
"name": "Traicie KO Criteria Interview Definition",
"description": "A Task to define KO Criteria questions to be used during the interview",
"partner": "traicie"
} }
} }

View File

@@ -8,7 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [2.3.6-alfa] ## [2.3.6-alfa]
### Added ### Added
- Full Chat Client functionaltiy, including Forms, ESS, theming - Full Chat Client functionality, including Forms, ESS, theming
- First Demo version of Traicie Selection Specialist - First Demo version of Traicie Selection Specialist
## [2.3.5-alfa] ## [2.3.5-alfa]

View File

@@ -12,7 +12,7 @@ import logging.config
from common.models.user import TenantDomain from common.models.user import TenantDomain
from common.utils.cors_utils import get_allowed_origins from common.utils.cors_utils import get_allowed_origins
from common.utils.database import Database from common.utils.database import Database
from config.logging_config import LOGGING from config.logging_config import configure_logging
from .api.document_api import document_ns from .api.document_api import document_ns
from .api.auth import auth_ns from .api.auth import auth_ns
from .api.specialist_execution_api import specialist_execution_ns from .api.specialist_execution_api import specialist_execution_ns
@@ -40,7 +40,7 @@ def create_app(config_file=None):
app.celery = make_celery(app.name, app.config) app.celery = make_celery(app.name, app.config)
init_celery(app.celery, app) init_celery(app.celery, app)
logging.config.dictConfig(LOGGING) configure_logging()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.info("eveai_api starting up") logger.info("eveai_api starting up")

View File

@@ -13,7 +13,7 @@ import common.models.interaction
import common.models.entitlements import common.models.entitlements
import common.models.document import common.models.document
from common.utils.startup_eveai import perform_startup_actions from common.utils.startup_eveai import perform_startup_actions
from config.logging_config import LOGGING from config.logging_config import configure_logging
from common.utils.security import set_tenant_session_data from common.utils.security import set_tenant_session_data
from common.utils.errors import register_error_handlers from common.utils.errors import register_error_handlers
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
@@ -47,8 +47,16 @@ def create_app(config_file=None):
except OSError: except OSError:
pass pass
logging.config.dictConfig(LOGGING) # Configureer logging op basis van de omgeving (K8s of traditioneel)
logger = logging.getLogger(__name__) try:
configure_logging()
logger = logging.getLogger(__name__)
# Test dat logging werkt
logger.debug("Logging test in eveai_app")
except Exception as e:
print(f"Critical Error Initialising Error: {str(e)}")
import traceback
traceback.print_exc()
logger.info("eveai_app starting up") logger.info("eveai_app starting up")
@@ -92,6 +100,45 @@ def create_app(config_file=None):
# app.logger.debug(f"Before request - Session data: {session}") # app.logger.debug(f"Before request - Session data: {session}")
# app.logger.debug(f"Before request - Request headers: {request.headers}") # app.logger.debug(f"Before request - Request headers: {request.headers}")
@app.before_request
def before_request():
from flask import session, request
from flask_login import current_user
import datetime
app.logger.debug(f"Before request - URL: {request.url}")
app.logger.debug(f"Before request - Session permanent: {session.permanent}")
# Log session expiry tijd als deze bestaat
if current_user.is_authenticated:
# Controleer of sessie permanent is (nodig voor PERMANENT_SESSION_LIFETIME)
if not session.permanent:
session.permanent = True
app.logger.debug("Session marked as permanent (enables 60min timeout)")
# Log wanneer sessie zou verlopen
if '_permanent' in session:
expires_at = datetime.datetime.now() + app.permanent_session_lifetime
app.logger.debug(f"Session will expire at: {expires_at} (60 min from now)")
@app.route('/debug/session')
def debug_session():
from flask import session
from flask_security import current_user
import datetime
if current_user.is_authenticated:
info = {
'session_permanent': session.permanent,
'session_lifetime_minutes': app.permanent_session_lifetime.total_seconds() / 60,
'session_refresh_enabled': app.config.get('SESSION_REFRESH_EACH_REQUEST'),
'current_time': datetime.datetime.now().isoformat(),
'session_data_keys': list(session.keys())
}
return jsonify(info)
else:
return jsonify({'error': 'Not authenticated'})
# Register template filters # Register template filters
register_filters(app) register_filters(app)

View File

@@ -259,6 +259,10 @@ def view_usages():
page = request.args.get('page', 1, type=int) page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 10, type=int) per_page = request.args.get('per_page', 10, type=int)
if not session.get('tenant', None):
flash('You can only view usage for a Tenant. Select a Tenant to continue!', 'danger')
return redirect(prefixed_url_for('user_bp.select_tenant'))
tenant_id = session.get('tenant').get('id') tenant_id = session.get('tenant').get('id')
query = LicenseUsage.query.filter_by(tenant_id=tenant_id).order_by(desc(LicenseUsage.id)) query = LicenseUsage.query.filter_by(tenant_id=tenant_id).order_by(desc(LicenseUsage.id))

View File

@@ -4,7 +4,7 @@ from flask import Flask
import os import os
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
from config.logging_config import LOGGING from config.logging_config import configure_logging
from config.config import get_config from config.config import get_config
@@ -21,7 +21,7 @@ def create_app(config_file=None):
case _: case _:
app.config.from_object(get_config('dev')) app.config.from_object(get_config('dev'))
logging.config.dictConfig(LOGGING) configure_logging()
register_extensions(app) register_extensions(app)

View File

@@ -9,7 +9,7 @@ from common.extensions import (db, bootstrap, cors, csrf, session,
minio_client, simple_encryption, metrics, cache_manager, content_manager) minio_client, simple_encryption, metrics, cache_manager, content_manager)
from common.models.user import Tenant, SpecialistMagicLinkTenant from common.models.user import Tenant, SpecialistMagicLinkTenant
from common.utils.startup_eveai import perform_startup_actions from common.utils.startup_eveai import perform_startup_actions
from config.logging_config import LOGGING from config.logging_config import configure_logging
from eveai_chat_client.utils.errors import register_error_handlers from eveai_chat_client.utils.errors import register_error_handlers
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
from common.utils.template_filters import register_filters from common.utils.template_filters import register_filters
@@ -39,7 +39,7 @@ def create_app(config_file=None):
except OSError: except OSError:
pass pass
logging.config.dictConfig(LOGGING) configure_logging()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.info("eveai_chat_client starting up") logger.info("eveai_chat_client starting up")

View File

@@ -5,7 +5,7 @@ import os
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
from common.extensions import db, cache_manager from common.extensions import db, cache_manager
from config.logging_config import LOGGING from config.logging_config import configure_logging
from config.config import get_config from config.config import get_config
@@ -22,7 +22,7 @@ def create_app(config_file=None):
case _: case _:
app.config.from_object(get_config('dev')) app.config.from_object(get_config('dev'))
logging.config.dictConfig(LOGGING) configure_logging()
app.logger.info('Starting up eveai_chat_workers...') app.logger.info('Starting up eveai_chat_workers...')
register_extensions(app) register_extensions(app)

View File

@@ -0,0 +1,20 @@
LANGUAGE_LEVEL = [
{
"name": "Basic",
"description": "Short, simple sentences. Minimal jargon. Lots of visual and concrete language.",
"cefr_level": "A2 - B1",
"ideal_audience": "Manual laborers, entry-level roles, newcomers with another native language"
},
{
"name": "Standard",
"description": "Clear spoken language. Well-formulated without difficult words.",
"cefr_level": "B2",
"ideal_audience": "Retail, administration, logistics, early-career professionals"
},
{
"name": "Professional",
"description": "Business language with technical terms where needed. More complex sentence structures.",
"cefr_level": "C1",
"ideal_audience": "Management, HR, technical profiles"
}
]

View File

@@ -0,0 +1,32 @@
TONE_OF_VOICE = [
{
"name": "Professional & Neutral",
"description": "Business-like, clear, to the point. Focused on facts.",
"when_to_use": "Corporate jobs, legal roles, formal sectors"
},
{
"name": "Warm & Empathetic",
"description": "Human, compassionate, reassuring.",
"when_to_use": "Healthcare, education, HR, social professions"
},
{
"name": "Energetic & Enthusiastic",
"description": "Upbeat, persuasive, motivating.",
"when_to_use": "Sales, marketing, hospitality, start-ups"
},
{
"name": "Accessible & Informal",
"description": "Casual, approachable, friendly, and human.",
"when_to_use": "Youth-focused, entry-level, retail, creative sectors"
},
{
"name": "Expert & Trustworthy",
"description": "Calm authority, advisory tone, knowledgeable.",
"when_to_use": "IT, engineering, consultancy, medical profiles"
},
{
"name": "No-nonsense & Goal-driven",
"description": "Direct, efficient, pragmatic.",
"when_to_use": "Technical, logistics, blue-collar jobs, production environments"
}
]

View File

@@ -0,0 +1,15 @@
from typing import List, Optional
from pydantic import BaseModel, Field
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
class KOQuestion(BaseModel):
title: str = Field(..., description="The title of the knockout criterium.")
question: str = Field(..., description="The corresponding question asked to the candidate.")
answer_positive: Optional[str] = Field(None, description="The answer to the question, resulting in a positive outcome.")
answer_negative: Optional[str] = Field(None, description="The answer to the question, resulting in a negative outcome.")
class KOQuestions(BaseModel):
ko_questions: List[KOQuestion] = Field(
default_factory=list,
description="KO Questions and answers."
)

View File

@@ -4,7 +4,8 @@ from typing import Dict, Any, List
from flask import current_app from flask import current_app
from common.extensions import cache_manager from common.extensions import cache_manager
from common.models.interaction import SpecialistRetriever from common.models.interaction import SpecialistRetriever, Specialist
from common.models.user import Tenant
from common.utils.execution_progress import ExecutionProgressTracker from common.utils.execution_progress import ExecutionProgressTracker
from config.logging_config import TuningLogger from config.logging_config import TuningLogger
from eveai_chat_workers.retrievers.base import BaseRetriever from eveai_chat_workers.retrievers.base import BaseRetriever
@@ -17,7 +18,9 @@ class BaseSpecialistExecutor(ABC):
def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id: str): def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id: str):
self.tenant_id = tenant_id self.tenant_id = tenant_id
self.tenant = Tenant.query.get_or_404(tenant_id)
self.specialist_id = specialist_id self.specialist_id = specialist_id
self.specialist = Specialist.query.get_or_404(specialist_id)
self.session_id = session_id self.session_id = session_id
self.task_id = task_id self.task_id = task_id
self.tuning = False self.tuning = False
@@ -96,6 +99,37 @@ class BaseSpecialistExecutor(ABC):
def update_progress(self, processing_type, data) -> None: def update_progress(self, processing_type, data) -> None:
self.ept.send_update(self.task_id, processing_type, data) self.ept.send_update(self.task_id, processing_type, data)
def _replace_system_variables(self, text: str) -> str:
"""
Replace all system variables in the text with their corresponding values.
System variables are in the format 'tenant_<attribute_name>'
Args:
text: The text containing system variables to replace
Returns:
str: The text with all system variables replaced
"""
if not text:
return text
from common.utils.model_utils import replace_variable_in_template
# Find all tenant_* variables and replace them with tenant attribute values
# Format of variables: tenant_name, tenant_code, etc.
result = text
# Get all attributes of the tenant object
tenant_attrs = vars(self.tenant)
# Replace all tenant_* variables
for attr_name, attr_value in tenant_attrs.items():
variable = f"tenant_{attr_name}"
if variable in result:
result = replace_variable_in_template(result, variable, str(attr_value))
return result
@abstractmethod @abstractmethod
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult: def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
"""Execute the specialist's logic""" """Execute the specialist's logic"""

View File

@@ -33,10 +33,6 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id): def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id):
super().__init__(tenant_id, specialist_id, session_id, task_id) super().__init__(tenant_id, specialist_id, session_id, task_id)
# Check and load the specialist
self.specialist = Specialist.query.get_or_404(specialist_id)
# Set the specific configuration for the SPIN Specialist
# self.specialist_configuration = json.loads(self.specialist.configuration)
self.tuning = self.specialist.tuning self.tuning = self.specialist.tuning
# Initialize retrievers # Initialize retrievers
self.retrievers = self._initialize_retrievers() self.retrievers = self._initialize_retrievers()
@@ -127,7 +123,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
for agent in self.specialist.agents: for agent in self.specialist.agents:
agent_config = cache_manager.agents_config_cache.get_config(agent.type, agent.type_version) agent_config = cache_manager.agents_config_cache.get_config(agent.type, agent.type_version)
agent_role = agent_config.get('role', '').replace('{custom_role}', agent.role or '') agent_role = agent_config.get('role', '').replace('{custom_role}', agent.role or '')
agent_role = self._replace_system_variables(agent_role)
agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '') agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '')
agent_goal = self._replace_system_variables(agent_goal)
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '') agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest') agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
agent_temperature = agent_config.get('temperature', 0.3) agent_temperature = agent_config.get('temperature', 0.3)
@@ -152,6 +150,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
task_config = cache_manager.tasks_config_cache.get_config(task.type, task.type_version) task_config = cache_manager.tasks_config_cache.get_config(task.type, task.type_version)
task_description = (task_config.get('task_description', '') task_description = (task_config.get('task_description', '')
.replace('{custom_description}', task.task_description or '')) .replace('{custom_description}', task.task_description or ''))
task_description = self._replace_system_variables(task_description)
task_expected_output = (task_config.get('expected_output', '') task_expected_output = (task_config.get('expected_output', '')
.replace('{custom_expected_output}', task.expected_output or '')) .replace('{custom_expected_output}', task.expected_output or ''))
# dynamically build the arguments # dynamically build the arguments
@@ -161,9 +160,12 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
"verbose": task.tuning "verbose": task.tuning
} }
task_name = task.type.lower() task_name = task.type.lower()
current_app.logger.debug(f"Task {task_name} is getting processed")
if task_name in self._task_pydantic_outputs: if task_name in self._task_pydantic_outputs:
task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name] task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name]
current_app.logger.debug(f"Task {task_name} has an output pydantic: {self._task_pydantic_outputs[task_name]}")
if task_name in self._task_agents: if task_name in self._task_agents:
current_app.logger.debug(f"Task {task_name} has an agent: {self._task_agents[task_name]}")
task_kwargs["agent"] = self._agents[self._task_agents[task_name]] task_kwargs["agent"] = self._agents[self._task_agents[task_name]]
# Instantiate the task with dynamic arguments # Instantiate the task with dynamic arguments

View File

@@ -18,6 +18,9 @@ from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Co
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
from common.services.interaction.specialist_services import SpecialistServices from common.services.interaction.specialist_services import SpecialistServices
NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST"
NEW_SPECIALIST_TYPE_VERSION = "1.3"
class SpecialistExecutor(CrewAIBaseSpecialistExecutor): class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
""" """
@@ -117,8 +120,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
new_specialist = Specialist( new_specialist = Specialist(
name=name, name=name,
description=f"Specialist for {arguments.role_name} role", description=f"Specialist for {arguments.role_name} role",
type="TRAICIE_SELECTION_SPECIALIST", type=NEW_SPECIALIST_TYPE,
type_version="1.1", type_version=NEW_SPECIALIST_TYPE_VERSION,
tuning=False, tuning=False,
configuration=selection_config, configuration=selection_config,
) )
@@ -130,7 +133,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
current_app.logger.error(f"Error creating selection specialist: {str(e)}") current_app.logger.error(f"Error creating selection specialist: {str(e)}")
raise e raise e
SpecialistServices.initialize_specialist(new_specialist.id, "TRAICIE_SELECTION_SPECIALIST", "1.0") SpecialistServices.initialize_specialist(new_specialist.id, NEW_SPECIALIST_TYPE, NEW_SPECIALIST_TYPE_VERSION)

View File

@@ -0,0 +1,273 @@
import asyncio
import json
from os import wait
from typing import Optional, List, Dict, Any
from datetime import date
from time import sleep
from crewai.flow.flow import start, listen, and_
from flask import current_app
from pydantic import BaseModel, Field, EmailStr
from sqlalchemy.exc import SQLAlchemyError
from common.extensions import db
from common.models.user import Tenant
from common.models.interaction import Specialist
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestions, KOQuestion
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Competencies
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
from common.services.interaction.specialist_services import SpecialistServices
from common.extensions import cache_manager
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
"""
type: TRAICIE_SELECTION_SPECIALIST
type_version: 1.1
Traicie Selection Specialist Executor class
"""
def __init__(self, tenant_id, specialist_id, session_id, task_id, **kwargs):
self.role_definition_crew = None
super().__init__(tenant_id, specialist_id, session_id, task_id)
# Load the Tenant & set language
self.tenant = Tenant.query.get_or_404(tenant_id)
@property
def type(self) -> str:
return "TRAICIE_SELECTION_SPECIALIST"
@property
def type_version(self) -> str:
return "1.3"
def _config_task_agents(self):
self._add_task_agent("traicie_ko_criteria_interview_definition_task", "traicie_recruiter_agent")
def _config_pydantic_outputs(self):
self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions")
def _instantiate_specialist(self):
verbose = self.tuning
ko_def_agents = [self.traicie_recruiter_agent]
ko_def_tasks = [self.traicie_ko_criteria_interview_definition_task]
self.ko_def_crew = EveAICrewAICrew(
self,
"KO Criteria Interview Definition Crew",
agents=ko_def_agents,
tasks=ko_def_tasks,
verbose=verbose,
)
self.flow = SelectionFlow(
self,
self.ko_def_crew
)
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist execution started", {})
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
current_app.logger.debug(f"Formatted Context: {formatted_context}")
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
if not self._cached_session.interactions:
specialist_phase = "initial"
else:
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
results = None
match specialist_phase:
case "initial":
results = self.execute_initial_state(arguments, formatted_context, citations)
case "ko_questions":
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0")
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?",
form_request=contact_form,
phase="personal_contact_data")
case "personal_contact_data":
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
phase="candidate_selected")
self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"})
return results
def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist initial_state_execution started", {})
knockout_competencies = [
{
"title": c["title"],
"description": c["description"]
}
for c in self.specialist.configuration.get("competencies", [])
if c.get("is_knockout") is True
]
# Convert TONE_OF_VOICE en LANGUAGE_LEVEL lists tp strings usable by the LLM
tone_of_voice_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nWhen to use: {item['when_to_use']}" for item in TONE_OF_VOICE])
language_level_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nCEFR level: {item['cefr_level']}\nIdeal Target Audience: {item['ideal_audience']}" for item in LANGUAGE_LEVEL])
flow_inputs = {
"region": arguments.region,
"working_schedule": arguments.working_schedule,
"start_date": arguments.start_date,
"language": arguments.language,
"interaction_mode": arguments.interaction_mode,
'tone_of_voice': self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral'),
'tone_of_voice_context': tone_of_voice_str,
'language_level': self.specialist.configuration.get('language_level', 'Standard'),
'language_level_context': language_level_str,
'ko_criteria': knockout_competencies,
}
flow_results = self.flow.kickoff(inputs=flow_inputs)
current_app.logger.debug(f"Flow results: {flow_results}")
current_app.logger.debug(f"Flow state: {self.flow.state}")
fields = {}
for ko_question in self.flow.state.ko_criteria_questions:
fields[ko_question.title] = {
"name": ko_question.title,
"description": ko_question.title,
"context": ko_question.question,
"type": "options",
"required": True,
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
}
ko_form = {
"type": "KO_CRITERIA_FORM",
"version": "1.0.0",
"name": "Starter Questions",
"icon": "verified",
"fields": fields,
}
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We starten met een aantal KO Criteria vragen",
form_request=ko_form,
phase="ko_questions")
return results
class SelectionInput(BaseModel):
region: str = Field(..., alias="region")
working_schedule: Optional[str] = Field(..., alias="working_schedule")
start_date: Optional[date] = Field(None, alias="vacancy_text")
language: Optional[str] = Field(None, alias="language")
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
language_level: Optional[str] = Field(None, alias="language_level")
language_level_context: Optional[str] = Field(None, alias="language_level_context")
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
question: Optional[str] = Field(None, alias="question")
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
class SelectionKOCriteriumScore(BaseModel):
criterium: Optional[str] = Field(None, alias="criterium")
answer: Optional[str] = Field(None, alias="answer")
score: Optional[int] = Field(None, alias="score")
class SelectionCompetencyScore(BaseModel):
competency: Optional[str] = Field(None, alias="competency")
answer: Optional[str] = Field(None, alias="answer")
score: Optional[int] = Field(None, alias="score")
class PersonalContactData(BaseModel):
name: str = Field(..., description="Your name", alias="name")
email: EmailStr = Field(..., description="Your Name", alias="email")
phone: str = Field(..., description="Your Phone Number", alias="phone")
address: Optional[str] = Field(None, description="Your Address", alias="address")
zip: Optional[str] = Field(None, description="Postal Code", alias="zip")
city: Optional[str] = Field(None, description="City", alias="city")
country: Optional[str] = Field(None, description="Country", alias="country")
consent: bool = Field(..., description="Consent", alias="consent")
class SelectionResult(SpecialistResult):
ko_criteria_questions: Optional[List[ListItem]] = Field(None, alias="ko_criteria_questions")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
class SelectionFlowState(EveAIFlowState):
"""Flow state for Traicie Role Definition specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
ko_criteria_questions: Optional[List[KOQuestion]] = Field(None, alias="ko_criteria_questions")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
phase: Optional[str] = Field(None, alias="phase")
interaction_mode: Optional[str] = Field(None, alias="mode")
class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
def __init__(self,
specialist_executor: CrewAIBaseSpecialistExecutor,
ko_def_crew: EveAICrewAICrew,
**kwargs):
super().__init__(specialist_executor, "Traicie Role Definition Specialist Flow", **kwargs)
self.specialist_executor = specialist_executor
self.ko_def_crew = ko_def_crew
self.exception_raised = False
@start()
def process_inputs(self):
return ""
@listen(process_inputs)
async def execute_ko_def_definition(self):
inputs = self.state.input.model_dump()
try:
current_app.logger.debug("execute_ko_interview_definition")
crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs)
# Unfortunately, crew_output will only contain the output of the latest task.
# As we will only take into account the flow state, we need to ensure both competencies and criteria
# are copies to the flow state.
update = {}
for task in self.ko_def_crew.tasks:
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
if task.name == "traicie_ko_criteria_interview_definition_task":
# update["competencies"] = task.output.pydantic.competencies
self.state.ko_criteria_questions = task.output.pydantic.ko_questions
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
self.state.phase = "personal_contact_data"
current_app.logger.debug(f"State after execute_ko_def_definition: {self.state}")
current_app.logger.debug(f"State dump after execute_ko_def_definition: {self.state.model_dump()}")
return crew_output
except Exception as e:
current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}")
self.exception_raised = True
raise e
async def kickoff_async(self, inputs=None):
current_app.logger.debug(f"Async kickoff {self.name}")
current_app.logger.debug(f"Inputs: {inputs}")
self.state.input = SelectionInput.model_validate(inputs)
current_app.logger.debug(f"State: {self.state}")
result = await super().kickoff_async(inputs)
return self.state

View File

@@ -5,7 +5,7 @@ import os
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
from common.extensions import db, minio_client, cache_manager from common.extensions import db, minio_client, cache_manager
from config.logging_config import LOGGING from config.logging_config import configure_logging
from config.config import get_config from config.config import get_config
@@ -22,7 +22,7 @@ def create_app(config_file=None):
case _: case _:
app.config.from_object(get_config('dev')) app.config.from_object(get_config('dev'))
logging.config.dictConfig(LOGGING) configure_logging()
register_extensions(app) register_extensions(app)

View File

@@ -5,7 +5,7 @@ import os
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
from common.extensions import db, minio_client, cache_manager from common.extensions import db, minio_client, cache_manager
import config.logging_config as logging_config from config.logging_config import configure_logging
from config.config import get_config from config.config import get_config
@@ -22,7 +22,7 @@ def create_app(config_file=None):
case _: case _:
app.config.from_object(get_config('dev')) app.config.from_object(get_config('dev'))
logging.config.dictConfig(logging_config.LOGGING) configure_logging()
register_extensions(app) register_extensions(app)

2
logs/.gitkeep Normal file
View File

@@ -0,0 +1,2 @@
# Deze directory bevat logbestanden
# .gitkeep zorgt ervoor dat de directory wordt meegenomen in Git

2
requirements-k8s.txt Normal file
View File

@@ -0,0 +1,2 @@
# Extra vereisten voor Kubernetes-omgeving
python-json-logger>=2.0.7

View File

@@ -93,3 +93,4 @@ prometheus_client~=0.21.1
scaleway~=2.9.0 scaleway~=2.9.0
html2text~=2025.4.15 html2text~=2025.4.15
markdown~=3.8 markdown~=3.8
python-json-logger~=2.0.7

102
scripts/check_logs.py Normal file
View File

@@ -0,0 +1,102 @@
#!/usr/bin/env python
"""
Dit script controleert of de logs directory bestaat en toegankelijk is,
en test of logging correct werkt.
"""
import os
import sys
import logging
import traceback
def check_logs_directory():
# Verkrijg het absolute pad naar de logs directory
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
logs_dir = os.path.join(base_dir, 'logs')
print(f"\nControleren van logs directory: {logs_dir}")
# Controleer of de directory bestaat
if not os.path.exists(logs_dir):
print(" - Directory bestaat niet. Proberen aan te maken...")
try:
os.makedirs(logs_dir, exist_ok=True)
print(" - Directory succesvol aangemaakt.")
except Exception as e:
print(f" - FOUT: Kan directory niet aanmaken: {e}")
return False
else:
print(" - Directory bestaat.")
# Controleer schrijfrechten
if not os.access(logs_dir, os.W_OK):
print(" - FOUT: Geen schrijfrechten voor de logs directory.")
return False
else:
print(" - Directory is schrijfbaar.")
# Probeer een testbestand te schrijven
test_file = os.path.join(logs_dir, 'test_write.log')
try:
with open(test_file, 'w') as f:
f.write('Test schrijven naar logs directory.\n')
print(f" - Succesvol testbestand geschreven naar {test_file}")
os.remove(test_file) # Verwijder het testbestand
print(" - Testbestand verwijderd.")
except Exception as e:
print(f" - FOUT: Kan niet schrijven naar logs directory: {e}")
return False
return True
def check_logging_config():
print("\nControleren van logging configuratie...")
try:
from config.logging_config import configure_logging
configure_logging()
print(" - Logging configuratie geladen.")
# Test enkele loggers
loggers_to_test = ['eveai_app', 'eveai_workers', 'eveai_api', 'tuning']
for logger_name in loggers_to_test:
logger = logging.getLogger(logger_name)
logger.info(f"Test log bericht van {logger_name}")
print(f" - Logger '{logger_name}' getest.")
print(" - Alle loggers succesvol getest.")
return True
except Exception as e:
print(f" - FOUT bij laden van logging configuratie: {e}")
traceback.print_exc()
return False
def main():
print("\nEveAI Logging Test Utility")
print("===========================\n")
directory_ok = check_logs_directory()
if not directory_ok:
print("\nPROBLEEM: De logs directory is niet toegankelijk of schrijfbaar.")
print("Oplossingen:")
print(" 1. Zorg ervoor dat de gebruiker die de applicatie uitvoert schrijfrechten heeft voor de logs directory.")
print(" 2. Voer het commando uit: mkdir -p logs && chmod 777 logs")
config_ok = check_logging_config()
if not config_ok:
print("\nPROBLEEM: De logging configuratie kon niet worden geladen.")
print("Controleer de config/logging_config.py file.")
if directory_ok and config_ok:
print("\nALLES OK: Logging lijkt correct geconfigureerd.")
print("Controleer de logbestanden in de 'logs' directory voor de testberichten.")
else:
print("\nEr zijn problemen gevonden die opgelost moeten worden.")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())