3 Commits

Author SHA1 Message Date
Josako
043cea45f2 Changelog update for 2.3.7 2025-06-23 11:51:52 +02:00
Josako
7b87880045 - Full Traicie Selection Specialist Flow implemented
- Added Specialist basics for handling phases and automatically transferring data between state and output
- Added QR-code generation for Magic Links
2025-06-23 11:46:56 +02:00
Josako
5b2c04501c - logging improvement and simplification (no more graylog)
- Traicie Selection Specialist Round Trip
- Session improvements + debugging enabled
- Tone of Voice & Langauge Level definitions introduced
2025-06-20 07:58:06 +02:00
34 changed files with 1172 additions and 174 deletions

67
README.md.k8s-logging Normal file
View File

@@ -0,0 +1,67 @@
# Kubernetes Logging Upgrade
## Overzicht
Deze instructies beschrijven hoe je alle services moet bijwerken om de nieuwe logging configuratie te gebruiken die zowel compatibel is met traditionele bestandsgebaseerde logging (voor ontwikkeling/test) als met Kubernetes (voor productie).
## Stappen voor elke service
Pas de volgende wijzigingen toe in elk van de volgende services:
- eveai_app
- eveai_workers
- eveai_api
- eveai_chat_client
- eveai_chat_workers
- eveai_beat
- eveai_entitlements
### 1. Update de imports
Verander:
```python
from config.logging_config import LOGGING
```
Naar:
```python
from config.logging_config import configure_logging
```
### 2. Update de logging configuratie
Verander:
```python
logging.config.dictConfig(LOGGING)
```
Naar:
```python
configure_logging()
```
## Dockerfile Aanpassingen
Voeg de volgende regels toe aan je Dockerfile voor elke service om de Kubernetes-specifieke logging afhankelijkheden te installeren (alleen voor productie):
```dockerfile
# Alleen voor productie (Kubernetes) builds
COPY requirements-k8s.txt /app/
RUN if [ "$ENVIRONMENT" = "production" ]; then pip install -r requirements-k8s.txt; fi
```
## Kubernetes Deployment
Zorg ervoor dat je Kubernetes deployment manifests de volgende omgevingsvariabele bevatten:
```yaml
env:
- name: FLASK_ENV
value: "production"
```
## Voordelen
1. De code detecteert automatisch of deze in Kubernetes draait
2. In ontwikkeling/test omgevingen blijft alles naar bestanden schrijven
3. In Kubernetes gaan logs naar stdout/stderr in JSON-formaat
4. Geen wijzigingen nodig in bestaande logger code in de applicatie

View File

@@ -248,3 +248,14 @@ class EveAIPendingLicensePeriod(EveAIException):
message = f"Basic Fee Payment has not been received yet. Please ensure payment has been made, and please wait for payment to be processed."
super().__init__(message, status_code, payload)
class EveAISpecialistExecutionError(EveAIException):
"""Raised when an error occurs during specialist execution"""
def __init__(self, tenant_id, specialist_id, session_id, details, status_code=400, payload=None):
message = (f"Error during specialist {specialist_id} execution \n"
f"with Session ID {session_id} \n"
f"for Tenant {tenant_id}. \n"
f"Details: {details} \n"
f"The System Administrator has been notified. Please try again later.")
super().__init__(message, status_code, payload)

View File

@@ -1,5 +1,5 @@
version: "1.0.0"
name: "Traicie HR BP "
name: "Traicie Recruiter"
role: >
You are an Expert Recruiter working for {tenant_name}
{custom_role}
@@ -16,10 +16,10 @@ backstory: >
AI-driven sourcing. Youre more than a recruiter—youre a trusted advisor, a brand ambassador, and a connector of
people and purpose.
{custom_backstory}
full_model_name: "mistral.mistral-medium-latest"
full_model_name: "mistral.magistral-medium-latest"
temperature: 0.3
metadata:
author: "Josako"
date_added: "2025-05-21"
description: "HR BP Agent."
date_added: "2025-06-18"
description: "Traicie Recruiter Agent"
changes: "Initial version"

View File

@@ -12,10 +12,7 @@ class Config(object):
DEBUG = False
DEVELOPMENT = False
SECRET_KEY = environ.get('SECRET_KEY')
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
COMPONENT_NAME = environ.get('COMPONENT_NAME')
SESSION_KEY_PREFIX = f'{COMPONENT_NAME}_'
# Database Settings
DB_HOST = environ.get('DB_HOST')
@@ -44,8 +41,6 @@ class Config(object):
# SECURITY_POST_CHANGE_VIEW = '/admin/login'
# SECURITY_BLUEPRINT_NAME = 'security_bp'
SECURITY_PASSWORD_SALT = environ.get('SECURITY_PASSWORD_SALT')
REMEMBER_COOKIE_SAMESITE = 'strict'
SESSION_COOKIE_SAMESITE = 'Lax'
SECURITY_CONFIRMABLE = True
SECURITY_TRACKABLE = True
SECURITY_PASSWORD_COMPLEXITY_CHECKER = 'zxcvbn'
@@ -56,6 +51,10 @@ class Config(object):
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE = 'Your Password Has Been Reset'
SECURITY_EMAIL_PLAINTEXT = False
SECURITY_EMAIL_HTML = True
SECURITY_SESSION_PROTECTION = 'basic' # of 'basic' als 'strong' problemen geeft
SECURITY_REMEMBER_TOKEN_VALIDITY = timedelta(minutes=60) # Zelfde als session lifetime
SECURITY_AUTO_LOGIN_AFTER_CONFIRM = True
SECURITY_AUTO_LOGIN_AFTER_RESET = True
# Ensure Flask-Security-Too is handling CSRF tokens when behind a proxy
SECURITY_CSRF_PROTECT_MECHANISMS = ['session']
@@ -189,6 +188,15 @@ class Config(object):
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
SESSION_REFRESH_EACH_REQUEST = True
SESSION_COOKIE_NAME = f'{COMPONENT_NAME}_session'
SESSION_COOKIE_DOMAIN = None # Laat Flask dit automatisch bepalen
SESSION_COOKIE_PATH = '/'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = False # True voor production met HTTPS
SESSION_COOKIE_SAMESITE = 'Lax'
REMEMBER_COOKIE_SAMESITE = 'strict'
SESSION_KEY_PREFIX = f'{COMPONENT_NAME}_'
# JWT settings
JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY')
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1) # Set token expiry to 1 hour
@@ -267,6 +275,7 @@ class DevConfig(Config):
# Define the nginx prefix used for the specific apps
EVEAI_APP_LOCATION_PREFIX = '/admin'
EVEAI_CHAT_LOCATION_PREFIX = '/chat'
CHAT_CLIENT_PREFIX = 'chat-client/chat/'
# file upload settings
# UPLOAD_FOLDER = '/app/tenant_files'

View File

@@ -1,15 +1,13 @@
import json
import os
import sys
from datetime import datetime as dt, timezone as tz
from flask import current_app
from graypy import GELFUDPHandler
import logging
import logging.config
# Graylog configuration
GRAYLOG_HOST = os.environ.get('GRAYLOG_HOST', 'localhost')
GRAYLOG_PORT = int(os.environ.get('GRAYLOG_PORT', 12201))
env = os.environ.get('FLASK_ENV', 'development')
@@ -144,23 +142,6 @@ class TuningFormatter(logging.Formatter):
return formatted_msg
class GraylogFormatter(logging.Formatter):
"""Maintains existing Graylog formatting while adding tuning fields"""
def format(self, record):
if getattr(record, 'is_tuning_log', False):
# Add tuning-specific fields to Graylog
record.tuning_fields = {
'is_tuning_log': True,
'tuning_type': record.tuning_type,
'tenant_id': record.tenant_id,
'catalog_id': record.catalog_id,
'specialist_id': record.specialist_id,
'retriever_id': record.retriever_id,
'processor_id': record.processor_id,
'session_id': record.session_id,
}
return super().format(record)
class TuningLogger:
"""Helper class to manage tuning logs with consistent structure"""
@@ -177,10 +158,10 @@ class TuningLogger:
specialist_id: Optional specialist ID for context
retriever_id: Optional retriever ID for context
processor_id: Optional processor ID for context
session_id: Optional session ID for context and log file naming
log_file: Optional custom log file name to use
session_id: Optional session ID for context
log_file: Optional custom log file name (ignored - all logs go to tuning.log)
"""
# Always use the standard tuning logger
self.logger = logging.getLogger(logger_name)
self.tenant_id = tenant_id
self.catalog_id = catalog_id
@@ -188,63 +169,8 @@ class TuningLogger:
self.retriever_id = retriever_id
self.processor_id = processor_id
self.session_id = session_id
self.log_file = log_file
# Determine whether to use a session-specific logger
if session_id:
# Create a unique logger name for this session
session_logger_name = f"{logger_name}_{session_id}"
self.logger = logging.getLogger(session_logger_name)
# If this logger doesn't have handlers yet, configure it
if not self.logger.handlers:
# Determine log file path
if not log_file and session_id:
log_file = f"logs/tuning_{session_id}.log"
elif not log_file:
log_file = "logs/tuning.log"
# Configure the logger
self._configure_session_logger(log_file)
else:
# Use the standard tuning logger
self.logger = logging.getLogger(logger_name)
def _configure_session_logger(self, log_file):
"""Configure a new session-specific logger with appropriate handlers"""
# Create and configure a file handler
file_handler = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=1024 * 1024 * 3, # 3MB
backupCount=3
)
file_handler.setFormatter(TuningFormatter())
file_handler.setLevel(logging.DEBUG)
# Add the file handler to the logger
self.logger.addHandler(file_handler)
# Add Graylog handler in production
env = os.environ.get('FLASK_ENV', 'development')
if env == 'production':
try:
graylog_handler = GELFUDPHandler(
host=GRAYLOG_HOST,
port=GRAYLOG_PORT,
debugging_fields=True
)
graylog_handler.setFormatter(GraylogFormatter())
self.logger.addHandler(graylog_handler)
except Exception as e:
# Fall back to just file logging if Graylog setup fails
fallback_logger = logging.getLogger('eveai_app')
fallback_logger.warning(f"Failed to set up Graylog handler: {str(e)}")
# Set logger level and disable propagation
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DEBUG):
def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DEBUG):
"""Log a tuning event with structured data"""
try:
# Create a standard LogRecord for tuning
@@ -275,13 +201,82 @@ def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DE
self.logger.handle(record)
except Exception as e:
fallback_logger = logging.getLogger('eveai_workers')
fallback_logger.exception(f"Failed to log tuning message: {str(e)}")
print(f"Failed to log tuning message: {str(e)}")
# Set the custom log record factory
logging.setLogRecordFactory(TuningLogRecord)
def configure_logging():
"""Configure logging based on environment
When running in Kubernetes, directs logs to stdout in JSON format
Otherwise uses file-based logging for development/testing
"""
try:
# Verkrijg het absolute pad naar de logs directory
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
logs_dir = os.path.join(base_dir, 'logs')
# Zorg ervoor dat de logs directory bestaat met de juiste permissies
if not os.path.exists(logs_dir):
try:
os.makedirs(logs_dir, exist_ok=True)
print(f"Logs directory aangemaakt op: {logs_dir}")
except (IOError, PermissionError) as e:
print(f"WAARSCHUWING: Kan logs directory niet aanmaken: {e}")
print(f"Logs worden mogelijk niet correct geschreven!")
# Check if running in Kubernetes
in_kubernetes = os.environ.get('KUBERNETES_SERVICE_HOST') is not None
# Controleer of de pythonjsonlogger pakket beschikbaar is als we in Kubernetes zijn
if in_kubernetes:
try:
import pythonjsonlogger.jsonlogger
has_json_logger = True
except ImportError:
print("WAARSCHUWING: python-json-logger pakket is niet geïnstalleerd.")
print("Voer 'pip install python-json-logger>=2.0.7' uit om JSON logging in te schakelen.")
print("Terugvallen op standaard logging formaat.")
has_json_logger = False
in_kubernetes = False # Fall back to standard logging
else:
has_json_logger = False
# Apply the configuration
logging_config = dict(LOGGING)
# Wijzig de json_console handler om terug te vallen op console als pythonjsonlogger niet beschikbaar is
if not has_json_logger and 'json_console' in logging_config['handlers']:
# Vervang json_console handler door een console handler met standaard formatter
logging_config['handlers']['json_console']['formatter'] = 'standard'
# In Kubernetes, conditionally modify specific loggers to use JSON console output
# This preserves the same logger names but changes where/how they log
if in_kubernetes:
for logger_name in logging_config['loggers']:
if logger_name: # Skip the root logger
logging_config['loggers'][logger_name]['handlers'] = ['json_console']
# Controleer of de logs directory schrijfbaar is voordat we de configuratie toepassen
logs_dir = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs')
if os.path.exists(logs_dir) and not os.access(logs_dir, os.W_OK):
print(f"WAARSCHUWING: Logs directory bestaat maar is niet schrijfbaar: {logs_dir}")
print("Logs worden mogelijk niet correct geschreven!")
logging.config.dictConfig(logging_config)
logging.info(f"Logging configured. Environment: {'Kubernetes' if in_kubernetes else 'Development/Testing'}")
logging.info(f"Logs directory: {logs_dir}")
except Exception as e:
print(f"Error configuring logging: {str(e)}")
print("Gedetailleerde foutinformatie:")
import traceback
traceback.print_exc()
# Fall back to basic configuration
logging.basicConfig(level=logging.INFO)
LOGGING = {
'version': 1,
@@ -290,7 +285,7 @@ LOGGING = {
'file_app': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_app.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_app.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -298,7 +293,7 @@ LOGGING = {
'file_workers': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_workers.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_workers.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -306,7 +301,7 @@ LOGGING = {
'file_chat_client': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_chat_client.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_chat_client.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -314,7 +309,7 @@ LOGGING = {
'file_chat_workers': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_chat_workers.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_chat_workers.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -322,7 +317,7 @@ LOGGING = {
'file_api': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_api.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_api.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -330,7 +325,7 @@ LOGGING = {
'file_beat': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_beat.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_beat.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -338,7 +333,7 @@ LOGGING = {
'file_entitlements': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/eveai_entitlements.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_entitlements.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -346,7 +341,7 @@ LOGGING = {
'file_sqlalchemy': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/sqlalchemy.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'sqlalchemy.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -354,7 +349,7 @@ LOGGING = {
'file_security': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/security.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'security.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -362,7 +357,7 @@ LOGGING = {
'file_rag_tuning': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/rag_tuning.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'rag_tuning.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -370,7 +365,7 @@ LOGGING = {
'file_embed_tuning': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/embed_tuning.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'embed_tuning.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -378,7 +373,7 @@ LOGGING = {
'file_business_events': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/business_events.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'business_events.log'),
'maxBytes': 1024 * 1024 * 1, # 1MB
'backupCount': 2,
'formatter': 'standard',
@@ -388,100 +383,104 @@ LOGGING = {
'level': 'DEBUG',
'formatter': 'standard',
},
'json_console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'json',
'stream': 'ext://sys.stdout',
},
'tuning_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/tuning.log',
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'tuning.log'),
'maxBytes': 1024 * 1024 * 3, # 3MB
'backupCount': 3,
'formatter': 'tuning',
},
'graylog': {
'level': 'DEBUG',
'class': 'graypy.GELFUDPHandler',
'host': GRAYLOG_HOST,
'port': GRAYLOG_PORT,
'debugging_fields': True,
'formatter': 'graylog'
},
},
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'graylog': {
'format': '[%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d in %(funcName)s] '
'[Thread: %(threadName)s]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'()': GraylogFormatter
},
'tuning': {
'()': TuningFormatter,
'datefmt': '%Y-%m-%d %H:%M:%S UTC'
},
'json': {
'format': '%(message)s',
'class': 'logging.Formatter' if not 'pythonjsonlogger' in sys.modules else 'pythonjsonlogger.jsonlogger.JsonFormatter',
'json_default': lambda obj: str(obj) if isinstance(obj, (dt, Exception)) else None,
'json_ensure_ascii': False,
'rename_fields': {
'asctime': 'timestamp',
'levelname': 'severity'
},
'timestamp': True,
'datefmt': '%Y-%m-%dT%H:%M:%S.%fZ'
}
},
'loggers': {
'eveai_app': { # logger for the eveai_app
'handlers': ['file_app', 'graylog', ] if env == 'production' else ['file_app', ],
'handlers': ['file_app'],
'level': 'DEBUG',
'propagate': False
},
'eveai_workers': { # logger for the eveai_workers
'handlers': ['file_workers', 'graylog', ] if env == 'production' else ['file_workers', ],
'handlers': ['file_workers'],
'level': 'DEBUG',
'propagate': False
},
'eveai_chat_client': { # logger for the eveai_chat
'handlers': ['file_chat_client', 'graylog', ] if env == 'production' else ['file_chat_client', ],
'handlers': ['file_chat_client'],
'level': 'DEBUG',
'propagate': False
},
'eveai_chat_workers': { # logger for the eveai_chat_workers
'handlers': ['file_chat_workers', 'graylog', ] if env == 'production' else ['file_chat_workers', ],
'handlers': ['file_chat_workers'],
'level': 'DEBUG',
'propagate': False
},
'eveai_api': { # logger for the eveai_chat_workers
'handlers': ['file_api', 'graylog', ] if env == 'production' else ['file_api', ],
'eveai_api': { # logger for the eveai_api
'handlers': ['file_api'],
'level': 'DEBUG',
'propagate': False
},
'eveai_beat': { # logger for the eveai_beat
'handlers': ['file_beat', 'graylog', ] if env == 'production' else ['file_beat', ],
'handlers': ['file_beat'],
'level': 'DEBUG',
'propagate': False
},
'eveai_entitlements': { # logger for the eveai_entitlements
'handlers': ['file_entitlements', 'graylog', ] if env == 'production' else ['file_entitlements', ],
'handlers': ['file_entitlements'],
'level': 'DEBUG',
'propagate': False
},
'sqlalchemy.engine': { # logger for the sqlalchemy
'handlers': ['file_sqlalchemy', 'graylog', ] if env == 'production' else ['file_sqlalchemy', ],
'handlers': ['file_sqlalchemy'],
'level': 'DEBUG',
'propagate': False
},
'security': { # logger for the security
'handlers': ['file_security', 'graylog', ] if env == 'production' else ['file_security', ],
'handlers': ['file_security'],
'level': 'DEBUG',
'propagate': False
},
'business_events': {
'handlers': ['file_business_events', 'graylog'],
'handlers': ['file_business_events'],
'level': 'DEBUG',
'propagate': False
},
# Single tuning logger
'tuning': {
'handlers': ['tuning_file', 'graylog'] if env == 'production' else ['tuning_file'],
'handlers': ['tuning_file'],
'level': 'DEBUG',
'propagate': False,
},
'': { # root logger
'handlers': ['console'],
'handlers': ['console'] if os.environ.get('KUBERNETES_SERVICE_HOST') is None else ['json_console'],
'level': 'WARNING', # Set higher level for root to minimize noise
'propagate': False
},
}
}
}

View File

@@ -1,4 +1,4 @@
version: "1.1.0"
version: "1.3.0"
name: "Traicie Selection Specialist"
framework: "crewai"
partner: "traicie"
@@ -108,13 +108,13 @@ results:
description: "List of vacancy competencies and their descriptions"
required: false
agents:
- type: "TRAICIE_HR_BP_AGENT"
- type: "TRAICIE_RECRUITER"
version: "1.0"
tasks:
- type: "TRAICIE_GET_COMPETENCIES_TASK"
version: "1.1"
- type: "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION"
version: "1.0"
metadata:
author: "Josako"
date_added: "2025-05-27"
changes: "Add make to the selection specialist"
date_added: "2025-06-16"
changes: "Realising the actual interaction with the LLM"
description: "Assistant to create a new Vacancy based on Vacancy Text"

View File

@@ -0,0 +1,120 @@
version: "1.3.0"
name: "Traicie Selection Specialist"
framework: "crewai"
partner: "traicie"
chat: false
configuration:
name:
name: "Name"
description: "The name the specialist is called upon."
type: "str"
required: true
role_reference:
name: "Role Reference"
description: "A customer reference to the role"
type: "str"
required: false
make:
name: "Make"
description: "The make for which the role is defined and the selection specialist is created"
type: "system"
system_name: "tenant_make"
required: true
competencies:
name: "Competencies"
description: "An ordered list of competencies."
type: "ordered_list"
list_type: "competency_details"
required: true
tone_of_voice:
name: "Tone of Voice"
description: "The tone of voice the specialist uses to communicate"
type: "enum"
allowed_values: ["Professional & Neutral", "Warm & Empathetic", "Energetic & Enthusiastic", "Accessible & Informal", "Expert & Trustworthy", "No-nonsense & Goal-driven"]
default: "Professional & Neutral"
required: true
language_level:
name: "Language Level"
description: "Language level to be used when communicating, relating to CEFR levels"
type: "enum"
allowed_values: ["Basic", "Standard", "Professional"]
default: "Standard"
required: true
welcome_message:
name: "Welcome Message"
description: "Introductory text given by the specialist - but translated according to Tone of Voice, Language Level and Starting Language"
type: "text"
required: false
closing_message:
name: "Closing Message"
description: "Closing message given by the specialist - but translated according to Tone of Voice, Language Level and Starting Language"
type: "text"
required: false
competency_details:
title:
name: "Title"
description: "Competency Title"
type: "str"
required: true
description:
name: "Description"
description: "Description (in context of the role) of the competency"
type: "text"
required: true
is_knockout:
name: "KO"
description: "Defines if the competency is a knock-out criterium"
type: "boolean"
required: true
default: false
assess:
name: "Assess"
description: "Indication if this competency is to be assessed"
type: "boolean"
required: true
default: true
arguments:
region:
name: "Region"
type: "str"
description: "The region of the specific vacancy"
required: false
working_schedule:
name: "Work Schedule"
type: "str"
description: "The work schedule or employment type of the specific vacancy"
required: false
start_date:
name: "Start Date"
type: "date"
description: "The start date of the specific vacancy"
required: false
language:
name: "Language"
type: "str"
description: "The language (2-letter code) used to start the conversation"
required: true
interaction_mode:
name: "Interaction Mode"
type: "enum"
description: "The interaction mode the specialist will start working in."
allowed_values: ["Job Application", "Seduction"]
default: "Job Application"
required: true
results:
competencies:
name: "competencies"
type: "List[str, str]"
description: "List of vacancy competencies and their descriptions"
required: false
agents:
- type: "TRAICIE_RECRUITER_AGENT"
version: "1.0"
tasks:
- type: "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK"
version: "1.0"
metadata:
author: "Josako"
date_added: "2025-06-18"
changes: "Add make to the selection specialist"
description: "Assistant to create a new Vacancy based on Vacancy Text"

View File

@@ -5,17 +5,24 @@ task_description: >
(both description and title). The criteria are in between triple backquotes.You need to prepare for the interviews,
and are to provide for each of these ko criteria:
- A question to ask the recruitment candidate describing the context of the ko criterium. Use your experience to not
just ask a closed question, but a question from which you can indirectly derive a positive or negative qualification
of the criterium based on the answer of the candidate.
- A set of max 5 answers on that question, from the candidates perspective. One of the answers will result in a
positive evaluation of the criterium, the other ones in a negative evaluation. Mark each of the answers as positive
- A short question to ask the recruitment candidate describing the context of the ko criterium. Use your experience to
ask a question that enables us to verify compliancy to the criterium.
- A set of 2 short answers to that question, from the candidates perspective. One of the answers will result in a
positive evaluation of the criterium, the other one in a negative evaluation. Mark each of the answers as positive
or negative.
Describe the answers from the perspective of the candidate. Be sure to include all necessary aspects in you answers.
Apply the following tone of voice in both questions and answers: {tone_of_voice}
Use the following description to understand tone of voice:
{tone_of_voice_context}
Apply the following language level in both questions and answers: {language_level}
Use {language} as language for both questions and answers.
Use the following description to understand language_level:
{language_level_context}
```{ko_criteria}```
@@ -25,7 +32,8 @@ expected_output: >
For each of the ko criteria, you provide:
- the exact title as specified in the original language
- the question in {language}
- a set of answers, with for each answer an indication if it is the correct answer, or a false response. In {language}.
- a positive answer, resulting in a positive evaluation of the criterium. In {language}.
- a negative answer, resulting in a negative evaluation of the criterium. In {language}.
{custom_expected_output}
metadata:
author: "Josako"

View File

@@ -0,0 +1,37 @@
version: "1.0.0"
name: "KO Criteria Interview Definition"
task_description: >
In context of a vacancy in your company {tenant_name}, you are provided with a set of competencies
(both description and title). The competencies are in between triple backquotes. The competencies provided should be
handled as knock-out criteria.
For each of the knock-out criteria, you need to define
- A short (1 sentence), closed-ended question (Yes / No) to ask the recruitment candidate. Use your experience to ask a question that
enables us to verify compliancy to the criterium.
- A set of 2 short answers (1 small sentence each) to that question (positive answer / negative answer), from the
candidates perspective.
The positive answer will result in a positive evaluation of the criterium, the negative answer in a negative evaluation
of the criterium. Try to avoid just using Yes / No as positive and negative answers.
Apply the following tone of voice in both questions and answers: {tone_of_voice}, i.e. {tone_of_voice_context}
Apply the following language level in both questions and answers: {language_level}, i.e. {language_level_context}
Use {language} as language for both questions and answers.
```{ko_criteria}```
{custom_description}
expected_output: >
For each of the ko criteria, you provide:
- the exact title as specified in the original language
- the question in {language}
- a positive answer, resulting in a positive evaluation of the criterium. In {language}.
- a negative answer, resulting in a negative evaluation of the criterium. In {language}.
{custom_expected_output}
metadata:
author: "Josako"
date_added: "2025-06-20"
description: "A Task to define interview Q&A from given KO Criteria"
changes: "Improvement to ensure closed-ended questions and short descriptions"

View File

@@ -32,5 +32,10 @@ AGENT_TYPES = {
"name": "Traicie HR BP Agent",
"description": "An HR Business Partner Agent",
"partner": "traicie"
}
},
"TRAICIE_RECRUITER_AGENT": {
"name": "Traicie Recruiter Agent",
"description": "An Senior Recruiter Agent",
"partner": "traicie"
},
}

View File

@@ -41,5 +41,10 @@ TASK_TYPES = {
"name": "Traicie Get KO Criteria",
"description": "A Task to get KO Criteria from a Vacancy Text",
"partner": "traicie"
},
"TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK": {
"name": "Traicie KO Criteria Interview Definition",
"description": "A Task to define KO Criteria questions to be used during the interview",
"partner": "traicie"
}
}

View File

@@ -5,10 +5,20 @@ All notable changes to EveAI will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [2.3.7-alfa]
### Added
- Basic Base Specialist additions for handling phases and transferring data between state and output
- Introduction of URL and QR-code for MagicLink
### Changed
- Logging improvement & simplification (remove Graylog)
- Traicie Selection Specialist v1.3 - full roundtrip & full process
## [2.3.6-alfa]
### Added
- Full Chat Client functionaltiy, including Forms, ESS, theming
- Full Chat Client functionality, including Forms, ESS, theming
- First Demo version of Traicie Selection Specialist
## [2.3.5-alfa]

View File

@@ -12,7 +12,7 @@ import logging.config
from common.models.user import TenantDomain
from common.utils.cors_utils import get_allowed_origins
from common.utils.database import Database
from config.logging_config import LOGGING
from config.logging_config import configure_logging
from .api.document_api import document_ns
from .api.auth import auth_ns
from .api.specialist_execution_api import specialist_execution_ns
@@ -40,7 +40,7 @@ def create_app(config_file=None):
app.celery = make_celery(app.name, app.config)
init_celery(app.celery, app)
logging.config.dictConfig(LOGGING)
configure_logging()
logger = logging.getLogger(__name__)
logger.info("eveai_api starting up")

View File

@@ -13,7 +13,7 @@ import common.models.interaction
import common.models.entitlements
import common.models.document
from common.utils.startup_eveai import perform_startup_actions
from config.logging_config import LOGGING
from config.logging_config import configure_logging
from common.utils.security import set_tenant_session_data
from common.utils.errors import register_error_handlers
from common.utils.celery_utils import make_celery, init_celery
@@ -47,8 +47,16 @@ def create_app(config_file=None):
except OSError:
pass
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
# Configureer logging op basis van de omgeving (K8s of traditioneel)
try:
configure_logging()
logger = logging.getLogger(__name__)
# Test dat logging werkt
logger.debug("Logging test in eveai_app")
except Exception as e:
print(f"Critical Error Initialising Error: {str(e)}")
import traceback
traceback.print_exc()
logger.info("eveai_app starting up")
@@ -92,6 +100,45 @@ def create_app(config_file=None):
# app.logger.debug(f"Before request - Session data: {session}")
# app.logger.debug(f"Before request - Request headers: {request.headers}")
@app.before_request
def before_request():
from flask import session, request
from flask_login import current_user
import datetime
app.logger.debug(f"Before request - URL: {request.url}")
app.logger.debug(f"Before request - Session permanent: {session.permanent}")
# Log session expiry tijd als deze bestaat
if current_user.is_authenticated:
# Controleer of sessie permanent is (nodig voor PERMANENT_SESSION_LIFETIME)
if not session.permanent:
session.permanent = True
app.logger.debug("Session marked as permanent (enables 60min timeout)")
# Log wanneer sessie zou verlopen
if '_permanent' in session:
expires_at = datetime.datetime.now() + app.permanent_session_lifetime
app.logger.debug(f"Session will expire at: {expires_at} (60 min from now)")
@app.route('/debug/session')
def debug_session():
from flask import session
from flask_security import current_user
import datetime
if current_user.is_authenticated:
info = {
'session_permanent': session.permanent,
'session_lifetime_minutes': app.permanent_session_lifetime.total_seconds() / 60,
'session_refresh_enabled': app.config.get('SESSION_REFRESH_EACH_REQUEST'),
'current_time': datetime.datetime.now().isoformat(),
'session_data_keys': list(session.keys())
}
return jsonify(info)
else:
return jsonify({'error': 'Not authenticated'})
# Register template filters
register_filters(app)

View File

@@ -9,11 +9,30 @@
{% block content %}
<form method="post">
{{ form.hidden_tag() }}
{% set disabled_fields = ['magic_link_code'] %}
{% set disabled_fields = ['magic_link_code', 'chat_client_url', 'qr_code_url'] %}
{% set exclude_fields = [] %}
<!-- Render Static Fields -->
{% for field in form.get_static_fields() %}
{{ render_field(field, disabled_fields, exclude_fields) }}
{% if field.name == 'qr_code_url' and field.data %}
<div class="form-group">
<label for="{{ field.id }}">{{ field.label.text }}</label>
<div style="max-width: 200px;">
<img src="{{ field.data }}" alt="QR Code" class="img-fluid">
</div>
<input type="hidden" name="{{ field.name }}" value="{{ field.data|e }}">
</div>
{% elif field.name == 'chat_client_url' %}
<div class="form-group">
<label for="{{ field.id }}" class="form-label">{{ field.label.text }}</label>
<div class="input-group">
<input type="text" class="form-control" value="{{ field.data }}" id="{{ field.id }}" readonly>
<a href="{{ field.data }}" class="btn btn-primary" target="_blank">Open link</a>
</div>
<input type="hidden" name="{{ field.name }}" value="{{ field.data|e }}">
</div>
{% else %}
{{ render_field(field, disabled_fields, exclude_fields) }}
{% endif %}
{% endfor %}
<!-- Render Dynamic Fields -->
{% for collection_name, fields in form.get_dynamic_fields().items() %}

View File

@@ -259,6 +259,10 @@ def view_usages():
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 10, type=int)
if not session.get('tenant', None):
flash('You can only view usage for a Tenant. Select a Tenant to continue!', 'danger')
return redirect(prefixed_url_for('user_bp.select_tenant'))
tenant_id = session.get('tenant').get('id')
query = LicenseUsage.query.filter_by(tenant_id=tenant_id).order_by(desc(LicenseUsage.id))

View File

@@ -162,6 +162,8 @@ class EditSpecialistMagicLinkForm(DynamicFormBase):
render_kw={'readonly': True})
specialist_id = IntegerField('Specialist', validators=[DataRequired()], render_kw={'readonly': True})
specialist_name = StringField('Specialist Name', validators=[DataRequired()], render_kw={'readonly': True})
chat_client_url = StringField('Chat Client URL', validators=[Optional()], render_kw={'readonly': True})
qr_code_url = StringField('QR Code', validators=[Optional()], render_kw={'readonly': True})
tenant_make_id = SelectField('Tenant Make', validators=[Optional()], coerce=int)
valid_from = DateField('Valid From', id='form-control datepicker', validators=[Optional()])
valid_to = DateField('Valid To', id='form-control datepicker', validators=[Optional()])

View File

@@ -748,6 +748,56 @@ def edit_specialist_magic_link(specialist_magic_link_id):
else:
form.tenant_make_id.data = specialist_ml.tenant_make_id
# Set the chat client URL
tenant_id = session.get('tenant').get('id')
chat_client_prefix = current_app.config.get('CHAT_CLIENT_PREFIX', 'chat_client/chat/')
base_url = request.url_root
magic_link_code = specialist_ml.magic_link_code
# Parse the URL om poortinformatie te behouden als deze afwijkt van de standaard
url_parts = request.url.split('/')
host_port = url_parts[2] # Dit bevat zowel hostname als poort indien aanwezig
# Generate the full URL for chat client with magic link code
chat_client_url = f"{request.scheme}://{host_port}/{chat_client_prefix}{magic_link_code}"
form.chat_client_url.data = chat_client_url
# Generate QR code as data URI for direct embedding in HTML
try:
import qrcode
import io
import base64
# Generate QR code as PNG for better compatibility
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4
)
qr.add_data(chat_client_url)
qr.make(fit=True)
# Generate PNG image in memory
img = qr.make_image(fill_color="black", back_color="white")
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img_data = buffer.getvalue()
# Create data URI for direct embedding in HTML
img_base64 = base64.b64encode(img_data).decode('utf-8')
data_uri = f"data:image/png;base64,{img_base64}"
# Store the data URI in the form data
form.qr_code_url.data = data_uri
current_app.logger.debug(f"QR code generated successfully for {magic_link_code}")
current_app.logger.debug(f"QR code data URI starts with: {data_uri[:50]}...")
except Exception as e:
current_app.logger.error(f"Failed to generate QR code: {str(e)}")
form.qr_code_url.data = "Error generating QR code"
if form.validate_on_submit():
# Update the basic fields
form.populate_obj(specialist_ml)

View File

@@ -4,7 +4,7 @@ from flask import Flask
import os
from common.utils.celery_utils import make_celery, init_celery
from config.logging_config import LOGGING
from config.logging_config import configure_logging
from config.config import get_config
@@ -21,7 +21,7 @@ def create_app(config_file=None):
case _:
app.config.from_object(get_config('dev'))
logging.config.dictConfig(LOGGING)
configure_logging()
register_extensions(app)

View File

@@ -9,7 +9,7 @@ from common.extensions import (db, bootstrap, cors, csrf, session,
minio_client, simple_encryption, metrics, cache_manager, content_manager)
from common.models.user import Tenant, SpecialistMagicLinkTenant
from common.utils.startup_eveai import perform_startup_actions
from config.logging_config import LOGGING
from config.logging_config import configure_logging
from eveai_chat_client.utils.errors import register_error_handlers
from common.utils.celery_utils import make_celery, init_celery
from common.utils.template_filters import register_filters
@@ -39,7 +39,7 @@ def create_app(config_file=None):
except OSError:
pass
logging.config.dictConfig(LOGGING)
configure_logging()
logger = logging.getLogger(__name__)
logger.info("eveai_chat_client starting up")

View File

@@ -5,7 +5,7 @@ import os
from common.utils.celery_utils import make_celery, init_celery
from common.extensions import db, cache_manager
from config.logging_config import LOGGING
from config.logging_config import configure_logging
from config.config import get_config
@@ -22,7 +22,7 @@ def create_app(config_file=None):
case _:
app.config.from_object(get_config('dev'))
logging.config.dictConfig(LOGGING)
configure_logging()
app.logger.info('Starting up eveai_chat_workers...')
register_extensions(app)

View File

@@ -0,0 +1,20 @@
LANGUAGE_LEVEL = [
{
"name": "Basic",
"description": "Short, simple sentences. Minimal jargon. Lots of visual and concrete language.",
"cefr_level": "A2 - B1",
"ideal_audience": "Manual laborers, entry-level roles, newcomers with another native language"
},
{
"name": "Standard",
"description": "Clear spoken language. Well-formulated without difficult words.",
"cefr_level": "B2",
"ideal_audience": "Retail, administration, logistics, early-career professionals"
},
{
"name": "Professional",
"description": "Business language with technical terms where needed. More complex sentence structures.",
"cefr_level": "C1",
"ideal_audience": "Management, HR, technical profiles"
}
]

View File

@@ -0,0 +1,32 @@
TONE_OF_VOICE = [
{
"name": "Professional & Neutral",
"description": "Business-like, clear, to the point. Focused on facts.",
"when_to_use": "Corporate jobs, legal roles, formal sectors"
},
{
"name": "Warm & Empathetic",
"description": "Human, compassionate, reassuring.",
"when_to_use": "Healthcare, education, HR, social professions"
},
{
"name": "Energetic & Enthusiastic",
"description": "Upbeat, persuasive, motivating.",
"when_to_use": "Sales, marketing, hospitality, start-ups"
},
{
"name": "Accessible & Informal",
"description": "Casual, approachable, friendly, and human.",
"when_to_use": "Youth-focused, entry-level, retail, creative sectors"
},
{
"name": "Expert & Trustworthy",
"description": "Calm authority, advisory tone, knowledgeable.",
"when_to_use": "IT, engineering, consultancy, medical profiles"
},
{
"name": "No-nonsense & Goal-driven",
"description": "Direct, efficient, pragmatic.",
"when_to_use": "Technical, logistics, blue-collar jobs, production environments"
}
]

View File

@@ -0,0 +1,15 @@
from typing import List, Optional
from pydantic import BaseModel, Field
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
class KOQuestion(BaseModel):
title: str = Field(..., description="The title of the knockout criterium.")
question: str = Field(..., description="The corresponding question asked to the candidate.")
answer_positive: Optional[str] = Field(None, description="The answer to the question, resulting in a positive outcome.")
answer_negative: Optional[str] = Field(None, description="The answer to the question, resulting in a negative outcome.")
class KOQuestions(BaseModel):
ko_questions: List[KOQuestion] = Field(
default_factory=list,
description="KO Questions and answers."
)

View File

@@ -4,7 +4,8 @@ from typing import Dict, Any, List
from flask import current_app
from common.extensions import cache_manager
from common.models.interaction import SpecialistRetriever
from common.models.interaction import SpecialistRetriever, Specialist
from common.models.user import Tenant
from common.utils.execution_progress import ExecutionProgressTracker
from config.logging_config import TuningLogger
from eveai_chat_workers.retrievers.base import BaseRetriever
@@ -17,7 +18,9 @@ class BaseSpecialistExecutor(ABC):
def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id: str):
self.tenant_id = tenant_id
self.tenant = Tenant.query.get_or_404(tenant_id)
self.specialist_id = specialist_id
self.specialist = Specialist.query.get_or_404(specialist_id)
self.session_id = session_id
self.task_id = task_id
self.tuning = False
@@ -96,6 +99,37 @@ class BaseSpecialistExecutor(ABC):
def update_progress(self, processing_type, data) -> None:
self.ept.send_update(self.task_id, processing_type, data)
def _replace_system_variables(self, text: str) -> str:
"""
Replace all system variables in the text with their corresponding values.
System variables are in the format 'tenant_<attribute_name>'
Args:
text: The text containing system variables to replace
Returns:
str: The text with all system variables replaced
"""
if not text:
return text
from common.utils.model_utils import replace_variable_in_template
# Find all tenant_* variables and replace them with tenant attribute values
# Format of variables: tenant_name, tenant_code, etc.
result = text
# Get all attributes of the tenant object
tenant_attrs = vars(self.tenant)
# Replace all tenant_* variables
for attr_name, attr_value in tenant_attrs.items():
variable = f"tenant_{attr_name}"
if variable in result:
result = replace_variable_in_template(result, variable, str(attr_value))
return result
@abstractmethod
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
"""Execute the specialist's logic"""

View File

@@ -33,10 +33,6 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id):
super().__init__(tenant_id, specialist_id, session_id, task_id)
# Check and load the specialist
self.specialist = Specialist.query.get_or_404(specialist_id)
# Set the specific configuration for the SPIN Specialist
# self.specialist_configuration = json.loads(self.specialist.configuration)
self.tuning = self.specialist.tuning
# Initialize retrievers
self.retrievers = self._initialize_retrievers()
@@ -54,15 +50,20 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
self._task_pydantic_outputs: Dict[str, Type[BaseModel]] = {}
self._task_state_names: Dict[str, str] = {}
# Processed configurations
# State-Result relations (for adding / restoring information to / from history
self._state_result_relations: Dict[str, str] = {}
# Process configurations
self._config = cache_manager.crewai_processed_config_cache.get_specialist_config(tenant_id, specialist_id)
self._config_task_agents()
self._config_pydantic_outputs()
self._instantiate_crew_assets()
self._instantiate_specialist()
self._config_state_result_relations()
# Retrieve history
self._cached_session = cache_manager.chat_session_cache.get_cached_session(self.session_id)
self._restore_state_from_history()
# Format history for the prompt
self._formatted_history = self._generate_formatted_history()
@@ -110,6 +111,19 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
"""Configure the task pydantic outputs by adding task-output combinations. Use _add_pydantic_output()"""
raise NotImplementedError
def _add_state_result_relation(self, state_name: str, result_name: str = None):
"""Add a state-result relation to the specialist. This is used to add information to the history
If result_name is None, the state name is used as the result name. (default behavior)
"""
if not result_name:
result_name = state_name
self._state_result_relations[state_name] = result_name
@abstractmethod
def _config_state_result_relations(self):
"""Configure the state-result relations by adding state-result combinations. Use _add_state_result_relation()"""
raise NotImplementedError
@property
def task_pydantic_outputs(self):
return self._task_pydantic_outputs
@@ -127,7 +141,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
for agent in self.specialist.agents:
agent_config = cache_manager.agents_config_cache.get_config(agent.type, agent.type_version)
agent_role = agent_config.get('role', '').replace('{custom_role}', agent.role or '')
agent_role = self._replace_system_variables(agent_role)
agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '')
agent_goal = self._replace_system_variables(agent_goal)
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
agent_temperature = agent_config.get('temperature', 0.3)
@@ -152,6 +168,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
task_config = cache_manager.tasks_config_cache.get_config(task.type, task.type_version)
task_description = (task_config.get('task_description', '')
.replace('{custom_description}', task.task_description or ''))
task_description = self._replace_system_variables(task_description)
task_expected_output = (task_config.get('expected_output', '')
.replace('{custom_expected_output}', task.expected_output or ''))
# dynamically build the arguments
@@ -161,9 +178,12 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
"verbose": task.tuning
}
task_name = task.type.lower()
current_app.logger.debug(f"Task {task_name} is getting processed")
if task_name in self._task_pydantic_outputs:
task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name]
current_app.logger.debug(f"Task {task_name} has an output pydantic: {self._task_pydantic_outputs[task_name]}")
if task_name in self._task_agents:
current_app.logger.debug(f"Task {task_name} has an agent: {self._task_agents[task_name]}")
task_kwargs["agent"] = self._agents[self._task_agents[task_name]]
# Instantiate the task with dynamic arguments
@@ -328,6 +348,27 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
return formatted_context, citations
def _update_specialist_results(self, specialist_results: SpecialistResult) -> SpecialistResult:
"""Update the specialist results with the latest state information"""
update_data = {}
state_dict = self.flow.state.model_dump()
for state_name, result_name in self._state_result_relations.items():
if state_name in state_dict and state_dict[state_name] is not None:
update_data[result_name] = state_dict[state_name]
return specialist_results.model_copy(update=update_data)
def _restore_state_from_history(self):
"""Restore the state from the history"""
if not self._cached_session.interactions:
return
last_interaction = self._cached_session.interactions[-1]
if not last_interaction.specialist_results:
return
for state_name, result_name in self._state_result_relations.items():
if result_name in last_interaction.specialist_results:
setattr(self.flow.state, state_name, last_interaction.specialist_results[result_name])
@abstractmethod
def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult:
raise NotImplementedError
@@ -354,8 +395,10 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
"detailed_query": detailed_query,
"citations": citations,
}
final_result = result.model_copy(update=modified_result)
intermediate_result = result.model_copy(update=modified_result)
else:
final_result = self.execute(arguments, "", [])
intermediate_result = self.execute(arguments, "", [])
final_result = self._update_specialist_results(intermediate_result)
return final_result

View File

@@ -18,6 +18,9 @@ from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Co
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
from common.services.interaction.specialist_services import SpecialistServices
NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST"
NEW_SPECIALIST_TYPE_VERSION = "1.3"
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
"""
@@ -117,8 +120,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
new_specialist = Specialist(
name=name,
description=f"Specialist for {arguments.role_name} role",
type="TRAICIE_SELECTION_SPECIALIST",
type_version="1.1",
type=NEW_SPECIALIST_TYPE,
type_version=NEW_SPECIALIST_TYPE_VERSION,
tuning=False,
configuration=selection_config,
)
@@ -130,7 +133,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
current_app.logger.error(f"Error creating selection specialist: {str(e)}")
raise e
SpecialistServices.initialize_specialist(new_specialist.id, "TRAICIE_SELECTION_SPECIALIST", "1.0")
SpecialistServices.initialize_specialist(new_specialist.id, NEW_SPECIALIST_TYPE, NEW_SPECIALIST_TYPE_VERSION)

View File

@@ -0,0 +1,350 @@
import asyncio
import json
from os import wait
from typing import Optional, List, Dict, Any
from datetime import date
from time import sleep
from crewai.flow.flow import start, listen, and_
from flask import current_app
from pydantic import BaseModel, Field, EmailStr
from sqlalchemy.exc import SQLAlchemyError
from common.extensions import db
from common.models.user import Tenant
from common.models.interaction import Specialist
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestions, KOQuestion
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Competencies
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
from common.services.interaction.specialist_services import SpecialistServices
from common.extensions import cache_manager
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE
from common.utils.eveai_exceptions import EveAISpecialistExecutionError
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
"""
type: TRAICIE_SELECTION_SPECIALIST
type_version: 1.1
Traicie Selection Specialist Executor class
"""
def __init__(self, tenant_id, specialist_id, session_id, task_id, **kwargs):
self.role_definition_crew = None
super().__init__(tenant_id, specialist_id, session_id, task_id)
# Load the Tenant & set language
self.tenant = Tenant.query.get_or_404(tenant_id)
@property
def type(self) -> str:
return "TRAICIE_SELECTION_SPECIALIST"
@property
def type_version(self) -> str:
return "1.3"
def _config_task_agents(self):
self._add_task_agent("traicie_ko_criteria_interview_definition_task", "traicie_recruiter_agent")
def _config_pydantic_outputs(self):
self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions")
def _config_state_result_relations(self):
self._add_state_result_relation("ko_criteria_questions")
self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("competency_questions")
self._add_state_result_relation("competency_scores")
self._add_state_result_relation("personal_contact_data")
def _instantiate_specialist(self):
verbose = self.tuning
ko_def_agents = [self.traicie_recruiter_agent]
ko_def_tasks = [self.traicie_ko_criteria_interview_definition_task]
self.ko_def_crew = EveAICrewAICrew(
self,
"KO Criteria Interview Definition Crew",
agents=ko_def_agents,
tasks=ko_def_tasks,
verbose=verbose,
)
self.flow = SelectionFlow(
self,
self.ko_def_crew
)
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist execution started", {})
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
current_app.logger.debug(f"Formatted Context: {formatted_context}")
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
if not self._cached_session.interactions:
specialist_phase = "initial"
else:
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
results = None
match specialist_phase:
case "initial":
results = self.execute_initial_state(arguments, formatted_context, citations)
case "ko_question_evaluation":
results = self.execute_ko_question_evaluation(arguments, formatted_context, citations)
case "personal_contact_data":
results = self.execute_personal_contact_data(arguments, formatted_context, citations)
case "no_valid_candidate":
results = self.execute_no_valid_candidate(arguments, formatted_context, citations)
case "candidate_selected":
results = self.execute_candidate_selected(arguments, formatted_context, citations)
self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"})
return results
def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist initial_state_execution started", {})
current_app.logger.debug(f"Specialist Competencies:\n{self.specialist.configuration.get("competencies", [])}")
ko_competencies = []
for competency in self.specialist.configuration.get("competencies", []):
if competency["is_knockout"] is True and competency["assess"] is True:
current_app.logger.debug(f"Assessable Knockout competency: {competency}")
ko_competencies.append({"title: ": competency["title"], "description": competency["description"]})
tone_of_voice = self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
selected_tone_of_voice = next(
(item for item in TONE_OF_VOICE if item["name"] == tone_of_voice),
None # fallback indien niet gevonden
)
current_app.logger.debug(f"Selected tone of voice: {selected_tone_of_voice}")
tone_of_voice_context = f"{selected_tone_of_voice["description"]}"
language_level = self.specialist.configuration.get('language_level', 'Standard')
selected_language_level = next(
(item for item in LANGUAGE_LEVEL if item["name"] == language_level),
None
)
current_app.logger.debug(f"Selected language level: {selected_language_level}")
language_level_context = (f"{selected_language_level['description']}, "
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
flow_inputs = {
"region": arguments.region,
"working_schedule": arguments.working_schedule,
"start_date": arguments.start_date,
"language": arguments.language,
"interaction_mode": arguments.interaction_mode,
'tone_of_voice': tone_of_voice,
'tone_of_voice_context': tone_of_voice_context,
'language_level': language_level,
'language_level_context': language_level_context,
'ko_criteria': ko_competencies,
}
flow_results = self.flow.kickoff(inputs=flow_inputs)
current_app.logger.debug(f"Flow results: {flow_results}")
current_app.logger.debug(f"Flow state: {self.flow.state}")
fields = {}
for ko_question in self.flow.state.ko_criteria_questions:
fields[ko_question.title] = {
"name": ko_question.title,
"description": ko_question.title,
"context": ko_question.question,
"type": "options",
"required": True,
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
}
ko_form = {
"type": "KO_CRITERIA_FORM",
"version": "1.0.0",
"name": "Starter Questions",
"icon": "verified",
"fields": fields,
}
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We starten met een aantal KO Criteria vragen",
form_request=ko_form,
phase="ko_question_evaluation")
return results
def execute_ko_question_evaluation(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist ko_question_evaluation started", {})
# Check if the form has been returned (it should)
if not arguments.form_values:
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id, "No form values returned")
current_app.logger.debug(f"Form values: {arguments.form_values}")
# Load the previous KO Questions
previous_ko_questions = self.flow.state.ko_criteria_questions
current_app.logger.debug(f"Previous KO Questions: {previous_ko_questions}")
# Evaluate KO Criteria
evaluation = "positive"
for criterium, answer in arguments.form_values.items():
for qa in previous_ko_questions:
if qa.get("title") == criterium:
if qa.get("answer_positive") != answer:
evaluation = "negative"
break
if evaluation == "negative":
break
if evaluation == "negative":
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job.",
form_request=None,
phase="no_valid_candidate")
else:
# Check if answers to questions are positive
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0")
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?",
form_request=contact_form,
phase="personal_contact_data")
return results
def execute_personal_contact_data(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data started", {})
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
phase="candidate_selected")
return results
def execute_no_valid_candidate(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job. Maar solliciteer gerust voor één van onze andere jobs.",
phase="no_valid_candidate")
def execute_candidate_selected(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist candidate_selected started", {})
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben je contactgegegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
phase="candidate_selected")
return results
class SelectionInput(BaseModel):
region: str = Field(..., alias="region")
working_schedule: Optional[str] = Field(..., alias="working_schedule")
start_date: Optional[date] = Field(None, alias="vacancy_text")
language: Optional[str] = Field(None, alias="language")
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
language_level: Optional[str] = Field(None, alias="language_level")
language_level_context: Optional[str] = Field(None, alias="language_level_context")
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
question: Optional[str] = Field(None, alias="question")
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
class SelectionKOCriteriumScore(BaseModel):
criterium: Optional[str] = Field(None, alias="criterium")
answer: Optional[str] = Field(None, alias="answer")
score: Optional[int] = Field(None, alias="score")
class SelectionCompetencyScore(BaseModel):
competency: Optional[str] = Field(None, alias="competency")
answer: Optional[str] = Field(None, alias="answer")
score: Optional[int] = Field(None, alias="score")
class PersonalContactData(BaseModel):
name: str = Field(..., description="Your name", alias="name")
email: EmailStr = Field(..., description="Your Name", alias="email")
phone: str = Field(..., description="Your Phone Number", alias="phone")
address: Optional[str] = Field(None, description="Your Address", alias="address")
zip: Optional[str] = Field(None, description="Postal Code", alias="zip")
city: Optional[str] = Field(None, description="City", alias="city")
country: Optional[str] = Field(None, description="Country", alias="country")
consent: bool = Field(..., description="Consent", alias="consent")
class SelectionResult(SpecialistResult):
ko_criteria_questions: Optional[List[ListItem]] = Field(None, alias="ko_criteria_questions")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
class SelectionFlowState(EveAIFlowState):
"""Flow state for Traicie Role Definition specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
ko_criteria_questions: Optional[List[KOQuestion]] = Field(None, alias="ko_criteria_questions")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
phase: Optional[str] = Field(None, alias="phase")
interaction_mode: Optional[str] = Field(None, alias="mode")
class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
def __init__(self,
specialist_executor: CrewAIBaseSpecialistExecutor,
ko_def_crew: EveAICrewAICrew,
**kwargs):
super().__init__(specialist_executor, "Traicie Role Definition Specialist Flow", **kwargs)
self.specialist_executor = specialist_executor
self.ko_def_crew = ko_def_crew
self.exception_raised = False
@start()
def process_inputs(self):
return ""
@listen(process_inputs)
async def execute_ko_def_definition(self):
inputs = self.state.input.model_dump()
try:
current_app.logger.debug("execute_ko_interview_definition")
crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs)
# Unfortunately, crew_output will only contain the output of the latest task.
# As we will only take into account the flow state, we need to ensure both competencies and criteria
# are copies to the flow state.
update = {}
for task in self.ko_def_crew.tasks:
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
if task.name == "traicie_ko_criteria_interview_definition_task":
# update["competencies"] = task.output.pydantic.competencies
self.state.ko_criteria_questions = task.output.pydantic.ko_questions
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
self.state.phase = "personal_contact_data"
current_app.logger.debug(f"State after execute_ko_def_definition: {self.state}")
current_app.logger.debug(f"State dump after execute_ko_def_definition: {self.state.model_dump()}")
return crew_output
except Exception as e:
current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}")
self.exception_raised = True
raise e
async def kickoff_async(self, inputs=None):
current_app.logger.debug(f"Async kickoff {self.name}")
current_app.logger.debug(f"Inputs: {inputs}")
self.state.input = SelectionInput.model_validate(inputs)
current_app.logger.debug(f"State: {self.state}")
result = await super().kickoff_async(inputs)
return self.state

View File

@@ -5,7 +5,7 @@ import os
from common.utils.celery_utils import make_celery, init_celery
from common.extensions import db, minio_client, cache_manager
from config.logging_config import LOGGING
from config.logging_config import configure_logging
from config.config import get_config
@@ -22,7 +22,7 @@ def create_app(config_file=None):
case _:
app.config.from_object(get_config('dev'))
logging.config.dictConfig(LOGGING)
configure_logging()
register_extensions(app)

View File

@@ -5,7 +5,7 @@ import os
from common.utils.celery_utils import make_celery, init_celery
from common.extensions import db, minio_client, cache_manager
import config.logging_config as logging_config
from config.logging_config import configure_logging
from config.config import get_config
@@ -22,7 +22,7 @@ def create_app(config_file=None):
case _:
app.config.from_object(get_config('dev'))
logging.config.dictConfig(logging_config.LOGGING)
configure_logging()
register_extensions(app)

2
logs/.gitkeep Normal file
View File

@@ -0,0 +1,2 @@
# Deze directory bevat logbestanden
# .gitkeep zorgt ervoor dat de directory wordt meegenomen in Git

2
requirements-k8s.txt Normal file
View File

@@ -0,0 +1,2 @@
# Extra vereisten voor Kubernetes-omgeving
python-json-logger>=2.0.7

View File

@@ -93,3 +93,5 @@ prometheus_client~=0.21.1
scaleway~=2.9.0
html2text~=2025.4.15
markdown~=3.8
python-json-logger~=2.0.7
qrcode[pil]==8.2

102
scripts/check_logs.py Normal file
View File

@@ -0,0 +1,102 @@
#!/usr/bin/env python
"""
Dit script controleert of de logs directory bestaat en toegankelijk is,
en test of logging correct werkt.
"""
import os
import sys
import logging
import traceback
def check_logs_directory():
# Verkrijg het absolute pad naar de logs directory
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
logs_dir = os.path.join(base_dir, 'logs')
print(f"\nControleren van logs directory: {logs_dir}")
# Controleer of de directory bestaat
if not os.path.exists(logs_dir):
print(" - Directory bestaat niet. Proberen aan te maken...")
try:
os.makedirs(logs_dir, exist_ok=True)
print(" - Directory succesvol aangemaakt.")
except Exception as e:
print(f" - FOUT: Kan directory niet aanmaken: {e}")
return False
else:
print(" - Directory bestaat.")
# Controleer schrijfrechten
if not os.access(logs_dir, os.W_OK):
print(" - FOUT: Geen schrijfrechten voor de logs directory.")
return False
else:
print(" - Directory is schrijfbaar.")
# Probeer een testbestand te schrijven
test_file = os.path.join(logs_dir, 'test_write.log')
try:
with open(test_file, 'w') as f:
f.write('Test schrijven naar logs directory.\n')
print(f" - Succesvol testbestand geschreven naar {test_file}")
os.remove(test_file) # Verwijder het testbestand
print(" - Testbestand verwijderd.")
except Exception as e:
print(f" - FOUT: Kan niet schrijven naar logs directory: {e}")
return False
return True
def check_logging_config():
print("\nControleren van logging configuratie...")
try:
from config.logging_config import configure_logging
configure_logging()
print(" - Logging configuratie geladen.")
# Test enkele loggers
loggers_to_test = ['eveai_app', 'eveai_workers', 'eveai_api', 'tuning']
for logger_name in loggers_to_test:
logger = logging.getLogger(logger_name)
logger.info(f"Test log bericht van {logger_name}")
print(f" - Logger '{logger_name}' getest.")
print(" - Alle loggers succesvol getest.")
return True
except Exception as e:
print(f" - FOUT bij laden van logging configuratie: {e}")
traceback.print_exc()
return False
def main():
print("\nEveAI Logging Test Utility")
print("===========================\n")
directory_ok = check_logs_directory()
if not directory_ok:
print("\nPROBLEEM: De logs directory is niet toegankelijk of schrijfbaar.")
print("Oplossingen:")
print(" 1. Zorg ervoor dat de gebruiker die de applicatie uitvoert schrijfrechten heeft voor de logs directory.")
print(" 2. Voer het commando uit: mkdir -p logs && chmod 777 logs")
config_ok = check_logging_config()
if not config_ok:
print("\nPROBLEEM: De logging configuratie kon niet worden geladen.")
print("Controleer de config/logging_config.py file.")
if directory_ok and config_ok:
print("\nALLES OK: Logging lijkt correct geconfigureerd.")
print("Controleer de logbestanden in de 'logs' directory voor de testberichten.")
else:
print("\nEr zijn problemen gevonden die opgelost moeten worden.")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())