- adding usage to specialist execution

- Correcting implementation of usage
- Removed some obsolete debug statements
This commit is contained in:
Josako
2025-03-07 11:10:28 +01:00
parent efff63043a
commit 5bfd3445bb
21 changed files with 215 additions and 255 deletions

View File

@@ -28,7 +28,6 @@ class TemplateManager:
# Initialize template manager # Initialize template manager
base_dir = "/app" base_dir = "/app"
self.templates_dir = os.path.join(base_dir, 'config', 'prompts') self.templates_dir = os.path.join(base_dir, 'config', 'prompts')
app.logger.debug(f'Loading templates from {self.templates_dir}')
self.app = app self.app = app
self._templates = self._load_templates() self._templates = self._load_templates()
# Log available templates for each supported model # Log available templates for each supported model

View File

@@ -11,7 +11,7 @@ class BusinessEventLog(db.Model):
tenant_id = db.Column(db.Integer, nullable=False) tenant_id = db.Column(db.Integer, nullable=False)
trace_id = db.Column(db.String(50), nullable=False) trace_id = db.Column(db.String(50), nullable=False)
span_id = db.Column(db.String(50)) span_id = db.Column(db.String(50))
span_name = db.Column(db.String(50)) span_name = db.Column(db.String(255))
parent_span_id = db.Column(db.String(50)) parent_span_id = db.Column(db.String(50))
document_version_id = db.Column(db.Integer) document_version_id = db.Column(db.Integer)
document_version_file_size = db.Column(db.Float) document_version_file_size = db.Column(db.Float)

View File

@@ -2,13 +2,14 @@ import os
import uuid import uuid
from contextlib import contextmanager from contextlib import contextmanager
from datetime import datetime from datetime import datetime
from typing import Dict, Any, Optional from typing import Dict, Any, Optional, List
from datetime import datetime as dt, timezone as tz from datetime import datetime as dt, timezone as tz
import logging import logging
from .business_event_context import BusinessEventContext from .business_event_context import BusinessEventContext
from common.models.entitlements import BusinessEventLog from common.models.entitlements import BusinessEventLog
from common.extensions import db from common.extensions import db
from .celery_utils import current_celery
class BusinessEvent: class BusinessEvent:
@@ -38,6 +39,7 @@ class BusinessEvent:
'call_count': 0, 'call_count': 0,
'interaction_type': None 'interaction_type': None
} }
self._log_buffer = []
def update_attribute(self, attribute: str, value: any): def update_attribute(self, attribute: str, value: any):
if hasattr(self, attribute): if hasattr(self, attribute):
@@ -80,7 +82,7 @@ class BusinessEvent:
self.span_name = span_name self.span_name = span_name
self.parent_span_id = parent_span_id self.parent_span_id = parent_span_id
self.log(f"Starting span {span_name}") self.log(f"Start")
try: try:
yield yield
@@ -88,7 +90,7 @@ class BusinessEvent:
if self.llm_metrics['call_count'] > 0: if self.llm_metrics['call_count'] > 0:
self.log_final_metrics() self.log_final_metrics()
self.reset_llm_metrics() self.reset_llm_metrics()
self.log(f"Ending span {span_name}") self.log(f"End")
# Restore the previous span info # Restore the previous span info
if self.spans: if self.spans:
self.span_id, self.span_name, self.parent_span_id = self.spans.pop() self.span_id, self.span_name, self.parent_span_id = self.spans.pop()
@@ -98,8 +100,8 @@ class BusinessEvent:
self.parent_span_id = None self.parent_span_id = None
def log(self, message: str, level: str = 'info'): def log(self, message: str, level: str = 'info'):
logger = logging.getLogger('business_events')
log_data = { log_data = {
'timestamp': dt.now(tz=tz.utc),
'event_type': self.event_type, 'event_type': self.event_type,
'tenant_id': self.tenant_id, 'tenant_id': self.tenant_id,
'trace_id': self.trace_id, 'trace_id': self.trace_id,
@@ -111,34 +113,16 @@ class BusinessEvent:
'chat_session_id': self.chat_session_id, 'chat_session_id': self.chat_session_id,
'interaction_id': self.interaction_id, 'interaction_id': self.interaction_id,
'environment': self.environment, 'environment': self.environment,
'message': message,
} }
# log to Graylog self._log_buffer.append(log_data)
getattr(logger, level)(message, extra=log_data)
# Log to database
event_log = BusinessEventLog(
timestamp=dt.now(tz=tz.utc),
event_type=self.event_type,
tenant_id=self.tenant_id,
trace_id=self.trace_id,
span_id=self.span_id,
span_name=self.span_name,
parent_span_id=self.parent_span_id,
document_version_id=self.document_version_id,
document_version_file_size=self.document_version_file_size,
chat_session_id=self.chat_session_id,
interaction_id=self.interaction_id,
environment=self.environment,
message=message
)
db.session.add(event_log)
db.session.commit()
def log_llm_metrics(self, metrics: dict, level: str = 'info'): def log_llm_metrics(self, metrics: dict, level: str = 'info'):
self.update_llm_metrics(metrics) self.update_llm_metrics(metrics)
message = "LLM Metrics" message = "LLM Metrics"
logger = logging.getLogger('business_events') logger = logging.getLogger('business_events')
log_data = { log_data = {
'timestamp': dt.now(tz=tz.utc),
'event_type': self.event_type, 'event_type': self.event_type,
'tenant_id': self.tenant_id, 'tenant_id': self.tenant_id,
'trace_id': self.trace_id, 'trace_id': self.trace_id,
@@ -155,38 +139,15 @@ class BusinessEvent:
'llm_metrics_completion_tokens': metrics['completion_tokens'], 'llm_metrics_completion_tokens': metrics['completion_tokens'],
'llm_metrics_total_time': metrics['time_elapsed'], 'llm_metrics_total_time': metrics['time_elapsed'],
'llm_interaction_type': metrics['interaction_type'], 'llm_interaction_type': metrics['interaction_type'],
'message': message,
} }
# log to Graylog self._log_buffer.append(log_data)
getattr(logger, level)(message, extra=log_data)
# Log to database
event_log = BusinessEventLog(
timestamp=dt.now(tz=tz.utc),
event_type=self.event_type,
tenant_id=self.tenant_id,
trace_id=self.trace_id,
span_id=self.span_id,
span_name=self.span_name,
parent_span_id=self.parent_span_id,
document_version_id=self.document_version_id,
document_version_file_size=self.document_version_file_size,
chat_session_id=self.chat_session_id,
interaction_id=self.interaction_id,
environment=self.environment,
llm_metrics_total_tokens=metrics['total_tokens'],
llm_metrics_prompt_tokens=metrics['prompt_tokens'],
llm_metrics_completion_tokens=metrics['completion_tokens'],
llm_metrics_total_time=metrics['time_elapsed'],
llm_interaction_type=metrics['interaction_type'],
message=message
)
db.session.add(event_log)
db.session.commit()
def log_final_metrics(self, level: str = 'info'): def log_final_metrics(self, level: str = 'info'):
logger = logging.getLogger('business_events') logger = logging.getLogger('business_events')
message = "Final LLM Metrics" message = "Final LLM Metrics"
log_data = { log_data = {
'timestamp': dt.now(tz=tz.utc),
'event_type': self.event_type, 'event_type': self.event_type,
'tenant_id': self.tenant_id, 'tenant_id': self.tenant_id,
'trace_id': self.trace_id, 'trace_id': self.trace_id,
@@ -204,34 +165,65 @@ class BusinessEvent:
'llm_metrics_total_time': self.llm_metrics['total_time'], 'llm_metrics_total_time': self.llm_metrics['total_time'],
'llm_metrics_call_count': self.llm_metrics['call_count'], 'llm_metrics_call_count': self.llm_metrics['call_count'],
'llm_interaction_type': self.llm_metrics['interaction_type'], 'llm_interaction_type': self.llm_metrics['interaction_type'],
'message': message,
} }
# log to Graylog self._log_buffer.append(log_data)
getattr(logger, level)(message, extra=log_data)
# Log to database @staticmethod
event_log = BusinessEventLog( def _direct_db_persist(log_entries: List[Dict[str, Any]]):
timestamp=dt.now(tz=tz.utc), """Fallback method to directly persist logs to DB if async fails"""
event_type=self.event_type, try:
tenant_id=self.tenant_id, db_entries = []
trace_id=self.trace_id, for entry in log_entries:
span_id=self.span_id, event_log = BusinessEventLog(
span_name=self.span_name, timestamp=entry.pop('timestamp'),
parent_span_id=self.parent_span_id, event_type=entry.pop('event_type'),
document_version_id=self.document_version_id, tenant_id=entry.pop('tenant_id'),
document_version_file_size=self.document_version_file_size, trace_id=entry.pop('trace_id'),
chat_session_id=self.chat_session_id, span_id=entry.pop('span_id', None),
interaction_id=self.interaction_id, span_name=entry.pop('span_name', None),
environment=self.environment, parent_span_id=entry.pop('parent_span_id', None),
llm_metrics_total_tokens=self.llm_metrics['total_tokens'], document_version_id=entry.pop('document_version_id', None),
llm_metrics_prompt_tokens=self.llm_metrics['prompt_tokens'], document_version_file_size=entry.pop('document_version_file_size', None),
llm_metrics_completion_tokens=self.llm_metrics['completion_tokens'], chat_session_id=entry.pop('chat_session_id', None),
llm_metrics_total_time=self.llm_metrics['total_time'], interaction_id=entry.pop('interaction_id', None),
llm_metrics_call_count=self.llm_metrics['call_count'], environment=entry.pop('environment', None),
llm_interaction_type=self.llm_metrics['interaction_type'], llm_metrics_total_tokens=entry.pop('llm_metrics_total_tokens', None),
message=message llm_metrics_prompt_tokens=entry.pop('llm_metrics_prompt_tokens', None),
) llm_metrics_completion_tokens=entry.pop('llm_metrics_completion_tokens', None),
db.session.add(event_log) llm_metrics_total_time=entry.pop('llm_metrics_total_time', None),
db.session.commit() llm_metrics_call_count=entry.pop('llm_metrics_call_count', None),
llm_interaction_type=entry.pop('llm_interaction_type', None),
message=entry.pop('message', None)
)
db_entries.append(event_log)
# Bulk insert
db.session.bulk_save_objects(db_entries)
db.session.commit()
except Exception as e:
logger = logging.getLogger('business_events')
logger.error(f"Failed to persist logs directly to DB: {e}")
db.session.rollback()
def _flush_log_buffer(self):
"""Flush the log buffer to the database via a Celery task"""
if self._log_buffer:
try:
# Send to Celery task
current_celery.send_task(
'persist_business_events',
args=[self._log_buffer],
queue='entitlements' # Or dedicated log queue
)
except Exception as e:
# Fallback to direct DB write in case of issues with Celery
logger = logging.getLogger('business_events')
logger.error(f"Failed to send logs to Celery. Falling back to direct DB: {e}")
self._direct_db_persist(self._log_buffer)
# Clear the buffer after sending
self._log_buffer = []
def __enter__(self): def __enter__(self):
self.log(f'Starting Trace for {self.event_type}') self.log(f'Starting Trace for {self.event_type}')
@@ -242,4 +234,5 @@ class BusinessEvent:
self.log_final_metrics() self.log_final_metrics()
self.reset_llm_metrics() self.reset_llm_metrics()
self.log(f'Ending Trace for {self.event_type}') self.log(f'Ending Trace for {self.event_type}')
self._flush_log_buffer()
return BusinessEventContext(self).__exit__(exc_type, exc_val, exc_tb) return BusinessEventContext(self).__exit__(exc_type, exc_val, exc_tb)

View File

@@ -120,9 +120,6 @@ class CacheHandler(Generic[T]):
region_name = getattr(self.region, 'name', 'default_region') region_name = getattr(self.region, 'name', 'default_region')
current_app.logger.debug(f"Generating cache key in region {region_name} with prefix {self.prefix} "
f"for {self._key_components}")
key = CacheKey({k: identifiers[k] for k in self._key_components}) key = CacheKey({k: identifiers[k] for k in self._key_components})
return f"{region_name}_{self.prefix}:{str(key)}" return f"{region_name}_{self.prefix}:{str(key)}"
@@ -138,13 +135,10 @@ class CacheHandler(Generic[T]):
Cached or newly created value Cached or newly created value
""" """
cache_key = self.generate_key(**identifiers) cache_key = self.generate_key(**identifiers)
current_app.logger.debug(f"Getting Cache key: {cache_key}")
def creator(): def creator():
instance = creator_func(**identifiers) instance = creator_func(**identifiers)
current_app.logger.debug("Caching object created and received. Now serializing...")
serialized_instance = self._to_cache_data(instance) serialized_instance = self._to_cache_data(instance)
current_app.logger.debug(f"Caching object serialized and received:\n{serialized_instance}")
return serialized_instance return serialized_instance
cached_data = self.region.get_or_create( cached_data = self.region.get_or_create(

View File

@@ -69,10 +69,8 @@ class BaseConfigCacheHandler(CacheHandler[Dict[str, Any]]):
Returns: Returns:
Configuration data Configuration data
""" """
current_app.logger.debug(f"Loading specific configuration for {type_name}, version: {version_str} - no cache")
version_tree = self.version_tree_cache.get_versions(type_name) version_tree = self.version_tree_cache.get_versions(type_name)
versions = version_tree['versions'] versions = version_tree['versions']
current_app.logger.debug(f"Loaded specific versions for {type_name}, versions: {versions}")
if version_str == 'latest': if version_str == 'latest':
version_str = version_tree['latest_version'] version_str = version_tree['latest_version']
@@ -81,12 +79,10 @@ class BaseConfigCacheHandler(CacheHandler[Dict[str, Any]]):
raise ValueError(f"Version {version_str} not found for {type_name}") raise ValueError(f"Version {version_str} not found for {type_name}")
file_path = versions[version_str]['file_path'] file_path = versions[version_str]['file_path']
current_app.logger.debug(f'Trying to load configuration from {file_path}')
try: try:
with open(file_path) as f: with open(file_path) as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)
current_app.logger.debug(f"Loaded config for {type_name}{version_str}: {config}")
return config return config
except Exception as e: except Exception as e:
raise ValueError(f"Error loading config from {file_path}: {e}") raise ValueError(f"Error loading config from {file_path}: {e}")
@@ -103,8 +99,6 @@ class BaseConfigCacheHandler(CacheHandler[Dict[str, Any]]):
Returns: Returns:
Configuration data Configuration data
""" """
current_app.logger.debug(f"Trying to retrieve config for {self.config_type}, type name: {type_name}, "
f"version: {version}")
if version is None: if version is None:
version_str = self.version_tree_cache.get_latest_version(type_name) version_str = self.version_tree_cache.get_latest_version(type_name)
elif is_major_minor(version): elif is_major_minor(version):
@@ -145,7 +139,6 @@ class BaseConfigVersionTreeCacheHandler(CacheHandler[Dict[str, Any]]):
Returns: Returns:
Dict containing available versions and their metadata Dict containing available versions and their metadata
""" """
current_app.logger.debug(f"Loading version tree for {type_name} - no cache")
type_path = Path(self._config_dir) / type_name type_path = Path(self._config_dir) / type_name
if not type_path.exists(): if not type_path.exists():
raise ValueError(f"No configuration found for type {type_name}") raise ValueError(f"No configuration found for type {type_name}")
@@ -180,8 +173,6 @@ class BaseConfigVersionTreeCacheHandler(CacheHandler[Dict[str, Any]]):
current_app.logger.error(f"Error loading version {ver}: {e}") current_app.logger.error(f"Error loading version {ver}: {e}")
continue continue
current_app.logger.debug(f"Loaded versions for {type_name}: {versions}")
current_app.logger.debug(f"Latest version for {type_name}: {latest_version}")
return { return {
'versions': versions, 'versions': versions,
'latest_version': latest_version 'latest_version': latest_version
@@ -221,7 +212,6 @@ class BaseConfigVersionTreeCacheHandler(CacheHandler[Dict[str, Any]]):
Returns: Returns:
Dict with version information Dict with version information
""" """
current_app.logger.debug(f"Trying to get version tree for {self.config_type}, {type_name}")
return self.get( return self.get(
lambda type_name: self._load_version_tree(type_name), lambda type_name: self._load_version_tree(type_name),
type_name=type_name type_name=type_name
@@ -319,7 +309,6 @@ class BaseConfigTypesCacheHandler(CacheHandler[Dict[str, Any]]):
def _load_type_definitions(self) -> Dict[str, Dict[str, str]]: def _load_type_definitions(self) -> Dict[str, Dict[str, str]]:
"""Load type definitions from the corresponding type_defs module""" """Load type definitions from the corresponding type_defs module"""
current_app.logger.debug(f"Loading type definitions for {self.config_type} - no cache")
if not self._types_module: if not self._types_module:
raise ValueError("_types_module must be set by subclass") raise ValueError("_types_module must be set by subclass")
@@ -331,13 +320,10 @@ class BaseConfigTypesCacheHandler(CacheHandler[Dict[str, Any]]):
for type_id, info in self._types_module.items() for type_id, info in self._types_module.items()
} }
current_app.logger.debug(f"Loaded type definitions: {type_definitions}")
return type_definitions return type_definitions
def get_types(self) -> Dict[str, Dict[str, str]]: def get_types(self) -> Dict[str, Dict[str, str]]:
"""Get dictionary of available types with name and description""" """Get dictionary of available types with name and description"""
current_app.logger.debug(f"Trying to retrieve type definitions for {self.config_type}")
result = self.get( result = self.get(
lambda type_name: self._load_type_definitions(), lambda type_name: self._load_type_definitions(),
type_name=f'{self.config_type}_types', type_name=f'{self.config_type}_types',

View File

@@ -211,8 +211,6 @@ class BaseCrewAIConfigProcessor:
tasks=self._process_task_configs(specialist_config), tasks=self._process_task_configs(specialist_config),
tools=self._process_tool_configs(specialist_config) tools=self._process_tool_configs(specialist_config)
) )
current_app.logger.debug(f"Processed config for tenant {self.tenant_id}, specialist {self.specialist_id}:\n"
f"{processed_config}")
return processed_config return processed_config
except Exception as e: except Exception as e:

View File

@@ -58,39 +58,6 @@ def init_celery(celery, app, is_beat=False):
celery.Task = ContextTask celery.Task = ContextTask
# Original init_celery before updating for beat
# def init_celery(celery, app):
# celery_app.main = app.name
# app.logger.debug(f'CELERY_BROKER_URL: {app.config["CELERY_BROKER_URL"]}')
# app.logger.debug(f'CELERY_RESULT_BACKEND: {app.config["CELERY_RESULT_BACKEND"]}')
# celery_config = {
# 'broker_url': app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379/0'),
# 'result_backend': app.config.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0'),
# 'task_serializer': app.config.get('CELERY_TASK_SERIALIZER', 'json'),
# 'result_serializer': app.config.get('CELERY_RESULT_SERIALIZER', 'json'),
# 'accept_content': app.config.get('CELERY_ACCEPT_CONTENT', ['json']),
# 'timezone': app.config.get('CELERY_TIMEZONE', 'UTC'),
# 'enable_utc': app.config.get('CELERY_ENABLE_UTC', True),
# 'task_routes': {'eveai_worker.tasks.create_embeddings': {'queue': 'embeddings',
# 'routing_key': 'embeddings.create_embeddings'}},
# }
# celery_app.conf.update(**celery_config)
#
# # Setting up Celery task queues
# celery_app.conf.task_queues = (
# Queue('default', routing_key='task.#'),
# Queue('embeddings', routing_key='embeddings.#', queue_arguments={'x-max-priority': 10}),
# Queue('llm_interactions', routing_key='llm_interactions.#', queue_arguments={'x-max-priority': 5}),
# )
#
# # Ensuring tasks execute with Flask application context
# class ContextTask(celery.Task):
# def __call__(self, *args, **kwargs):
# with app.app_context():
# return self.run(*args, **kwargs)
#
# celery.Task = ContextTask
def make_celery(app_name, config): def make_celery(app_name, config):
return celery_app return celery_app

View File

@@ -5,58 +5,11 @@ import json
def log_request_middleware(app): def log_request_middleware(app):
# @app.before_request
# def log_request_info():
# start_time = time.time()
# app.logger.debug(f"Request URL: {request.url}")
# app.logger.debug(f"Request Method: {request.method}")
# app.logger.debug(f"Request Headers: {request.headers}")
# app.logger.debug(f"Time taken for logging request info: {time.time() - start_time} seconds")
# try:
# app.logger.debug(f"Request Body: {request.get_data()}")
# except Exception as e:
# app.logger.error(f"Error reading request body: {e}")
# app.logger.debug(f"Time taken for logging request body: {time.time() - start_time} seconds")
# @app.before_request
# def check_csrf_token():
# start_time = time.time()
# if request.method == "POST":
# csrf_token = request.form.get("csrf_token")
# app.logger.debug(f"CSRF Token: {csrf_token}")
# app.logger.debug(f"Time taken for logging CSRF token: {time.time() - start_time} seconds")
# @app.before_request
# def log_user_info():
# if current_user and current_user.is_authenticated:
# app.logger.debug(f"Before: User ID: {current_user.id}")
# app.logger.debug(f"Before: User Email: {current_user.email}")
# app.logger.debug(f"Before: User Roles: {current_user.roles}")
# else:
# app.logger.debug("After: No user logged in")
@app.before_request @app.before_request
def log_session_state_before(): def log_session_state_before():
pass pass
# @app.after_request
# def log_response_info(response):
# start_time = time.time()
# app.logger.debug(f"Response Status: {response.status}")
# app.logger.debug(f"Response Headers: {response.headers}")
#
# app.logger.debug(f"Time taken for logging response info: {time.time() - start_time} seconds")
# return response
# @app.after_request
# def log_user_after_request(response):
# if current_user and current_user.is_authenticated:
# app.logger.debug(f"After: User ID: {current_user.id}")
# app.logger.debug(f"after: User Email: {current_user.email}")
# app.logger.debug(f"After: User Roles: {current_user.roles}")
# else:
# app.logger.debug("After: No user logged in")
@app.after_request @app.after_request
def log_session_state_after(response): def log_session_state_after(response):
return response return response
@@ -149,8 +102,3 @@ def register_request_debugger(app):
# Format the debug info as a pretty-printed JSON string with indentation # Format the debug info as a pretty-printed JSON string with indentation
formatted_debug_info = json.dumps(debug_info, indent=2, sort_keys=True) formatted_debug_info = json.dumps(debug_info, indent=2, sort_keys=True)
# Log everything in a single statement
app.logger.debug(
"Request Debug Information\n",
extra={"request_debug\n": formatted_debug_info}
)

View File

@@ -367,7 +367,6 @@ def mark_tenant_storage_dirty(tenant_id):
def cope_with_local_url(url): def cope_with_local_url(url):
current_app.logger.debug(f'Incomming URL: {url}')
parsed_url = urlparse(url) parsed_url = urlparse(url)
# Check if this is an internal WordPress URL (TESTING) and rewrite it # Check if this is an internal WordPress URL (TESTING) and rewrite it
if parsed_url.netloc in [current_app.config['EXTERNAL_WORDPRESS_BASE_URL']]: if parsed_url.netloc in [current_app.config['EXTERNAL_WORDPRESS_BASE_URL']]:
@@ -376,7 +375,6 @@ def cope_with_local_url(url):
netloc=f"{current_app.config['WORDPRESS_HOST']}:{current_app.config['WORDPRESS_PORT']}" netloc=f"{current_app.config['WORDPRESS_HOST']}:{current_app.config['WORDPRESS_PORT']}"
) )
url = urlunparse(parsed_url) url = urlunparse(parsed_url)
current_app.logger.debug(f'Translated Wordpress URL to: {url}')
return url return url
@@ -412,10 +410,6 @@ def lookup_document(tenant_id: int, lookup_criteria: dict, metadata_type: str) -
for key, value in lookup_criteria.items(): for key, value in lookup_criteria.items():
query = query.filter(metadata_field[key].astext == str(value)) query = query.filter(metadata_field[key].astext == str(value))
# Log the final SQL query
current_app.logger.debug(
f"Final SQL query: {query.statement.compile(compile_kwargs={'literal_binds': True})}")
# Get first result # Get first result
result = query.first() result = query.first()

View File

@@ -99,13 +99,11 @@ def _create_agent(
timestamp: Optional[dt] = None timestamp: Optional[dt] = None
) -> EveAIAgent: ) -> EveAIAgent:
"""Create an agent with the given configuration.""" """Create an agent with the given configuration."""
current_app.logger.debug(f"Creating agent {agent_type} {agent_version} with {name}, {description}")
if timestamp is None: if timestamp is None:
timestamp = dt.now(tz=tz.utc) timestamp = dt.now(tz=tz.utc)
# Get agent configuration from cache # Get agent configuration from cache
agent_config = cache_manager.agents_config_cache.get_config(agent_type, agent_version) agent_config = cache_manager.agents_config_cache.get_config(agent_type, agent_version)
current_app.logger.debug(f"Agent Config: {agent_config}")
agent = EveAIAgent( agent = EveAIAgent(
specialist_id=specialist_id, specialist_id=specialist_id,
@@ -142,7 +140,6 @@ def _create_task(
# Get task configuration from cache # Get task configuration from cache
task_config = cache_manager.tasks_config_cache.get_config(task_type, task_version) task_config = cache_manager.tasks_config_cache.get_config(task_type, task_version)
current_app.logger.debug(f"Task Config: {task_config}")
task = EveAITask( task = EveAITask(
specialist_id=specialist_id, specialist_id=specialist_id,
@@ -180,7 +177,6 @@ def _create_tool(
# Get tool configuration from cache # Get tool configuration from cache
tool_config = cache_manager.tools_config_cache.get_config(tool_type, tool_version) tool_config = cache_manager.tools_config_cache.get_config(tool_type, tool_version)
current_app.logger.debug(f"Tool Config: {tool_config}")
tool = EveAITool( tool = EveAITool(
specialist_id=specialist_id, specialist_id=specialist_id,

View File

@@ -28,15 +28,10 @@ def perform_startup_invalidation(app):
try: try:
# Check if invalidation was already performed # Check if invalidation was already performed
if not redis_client.get(marker_key): if not redis_client.get(marker_key):
app.logger.debug(f"Performing cache invalidation at startup time {startup_time}")
app.logger.debug(f"Current cache keys: {redis_client.keys('*')}")
# Perform invalidation # Perform invalidation
cache_manager.invalidate_region('eveai_config') cache_manager.invalidate_region('eveai_config')
cache_manager.invalidate_region('eveai_chat_workers') cache_manager.invalidate_region('eveai_chat_workers')
app.logger.debug(f"Cache keys after invalidation: {redis_client.keys('*')}")
redis_client.setex(marker_key, 180, str(startup_time)) redis_client.setex(marker_key, 180, str(startup_time))
app.logger.info("Startup cache invalidation completed") app.logger.info("Startup cache invalidation completed")
else: else:

View File

@@ -66,7 +66,6 @@ def create_app(config_file=None):
@app.before_request @app.before_request
def check_cors(): def check_cors():
if request.method == 'OPTIONS': if request.method == 'OPTIONS':
app.logger.debug("Handling OPTIONS request")
return '', 200 # Allow OPTIONS to pass through return '', 200 # Allow OPTIONS to pass through
origin = request.headers.get('Origin') origin = request.headers.get('Origin')

View File

@@ -83,7 +83,6 @@ class Token(Resource):
expires_delta=expires_delta, expires_delta=expires_delta,
additional_claims=additional_claims additional_claims=additional_claims
) )
current_app.logger.debug(f"Created token: {access_token}")
return { return {
'access_token': access_token, 'access_token': access_token,
'expires_in': expires_delta.total_seconds() 'expires_in': expires_delta.total_seconds()
@@ -164,10 +163,6 @@ class Services(Resource):
""" """
Get allowed services for the current token Get allowed services for the current token
""" """
# Log the incoming authorization header
auth_header = request.headers.get('Authorization')
current_app.logger.debug(f"Received Authorization header: {auth_header}")
claims = get_jwt() claims = get_jwt()
tenant_id = get_jwt_identity() tenant_id = get_jwt_identity()

View File

@@ -345,7 +345,6 @@ class DocumentResource(Resource):
def put(self, document_id): def put(self, document_id):
"""Edit a document. The content of the document will not be refreshed!""" """Edit a document. The content of the document will not be refreshed!"""
try: try:
current_app.logger.debug(f'Editing document {document_id}')
data = request.json data = request.json
tenant_id = get_jwt_identity() tenant_id = get_jwt_identity()
updated_doc, error = edit_document(tenant_id, document_id, data.get('name', None), updated_doc, error = edit_document(tenant_id, document_id, data.get('name', None),

View File

@@ -118,7 +118,6 @@ class SpecialistArgument(Resource):
if specialist: if specialist:
configuration = cache_manager.specialists_config_cache.get_config(specialist.type, configuration = cache_manager.specialists_config_cache.get_config(specialist.type,
specialist.type_version) specialist.type_version)
current_app.logger.debug(f"Configuration returned: {configuration}")
if configuration: if configuration:
if 'arguments' in configuration: if 'arguments' in configuration:
return { return {

View File

@@ -36,9 +36,7 @@ def log_after_request(response):
@roles_required('Super User') @roles_required('Super User')
def tenant(): def tenant():
form = TenantForm() form = TenantForm()
current_app.logger.debug(f'Tenant form: {form}')
if form.validate_on_submit(): if form.validate_on_submit():
current_app.logger.debug(f'Tenant form submitted: {form.data}')
# Handle the required attributes # Handle the required attributes
new_tenant = Tenant() new_tenant = Tenant()
form.populate_obj(new_tenant) form.populate_obj(new_tenant)

View File

@@ -198,7 +198,13 @@ class SPINFlowState(EveAIFlowState):
class SPINFlow(EveAICrewAIFlow[SPINFlowState]): class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
def __init__(self, specialist_executor, rag_crew, spin_crew, identification_crew, rag_consolidation_crew, **kwargs): def __init__(self,
specialist_executor: CrewAIBaseSpecialistExecutor,
rag_crew: EveAICrewAICrew,
spin_crew: EveAICrewAICrew,
identification_crew: EveAICrewAICrew,
rag_consolidation_crew: EveAICrewAICrew,
**kwargs):
super().__init__(specialist_executor, "SPIN Specialist Flow", **kwargs) super().__init__(specialist_executor, "SPIN Specialist Flow", **kwargs)
self.specialist_executor = specialist_executor self.specialist_executor = specialist_executor
self.rag_crew = rag_crew self.rag_crew = rag_crew

View File

@@ -1,12 +1,16 @@
import json import json
import time
from crewai import Agent, Task, Crew, Flow from crewai import Agent, Task, Crew, Flow
from crewai.agents.parser import AgentAction, AgentFinish from crewai.agents.parser import AgentAction, AgentFinish
from crewai.crews import CrewOutput
from crewai.tools import BaseTool from crewai.tools import BaseTool
from flask import current_app from flask import current_app
from pydantic import BaseModel, create_model, Field, ConfigDict from pydantic import BaseModel, create_model, Field, ConfigDict
from typing import Dict, Type, get_type_hints, Optional, List, Any, Callable from typing import Dict, Type, get_type_hints, Optional, List, Any, Callable
from common.utils.business_event_context import current_event
class EveAICrewAIAgent(Agent): class EveAICrewAIAgent(Agent):
specialist: Any = Field(default=None, exclude=True) specialist: Any = Field(default=None, exclude=True)
@@ -36,26 +40,27 @@ class EveAICrewAIAgent(Agent):
Returns: Returns:
Output of the agent Output of the agent
""" """
self.specialist.log_tuning("EveAI Agent Task Start", with current_event.create_span(f"Task Execution {task.name} by {self.name}"):
{"name": self.name, self.specialist.log_tuning("EveAI Agent Task Start",
'task': task.name, {"name": self.name,
}) 'task': task.name,
self.specialist.update_progress("EveAI Agent Task Start", })
{"name": self.name, self.specialist.update_progress("EveAI Agent Task Start",
'task': task.name, {"name": self.name,
}) 'task': task.name,
})
result = super().execute_task(task, context, tools) result = super().execute_task(task, context, tools)
self.specialist.log_tuning("EveAI Agent Task Complete", self.specialist.log_tuning("EveAI Agent Task Complete",
{"name": self.name, {"name": self.name,
'task': task.name, 'task': task.name,
'result': result, 'result': result,
}) })
self.specialist.update_progress("EveAI Agent Task Complete", self.specialist.update_progress("EveAI Agent Task Complete",
{"name": self.name, {"name": self.name,
'task': task.name, 'task': task.name,
}) })
return result return result
@@ -75,26 +80,6 @@ class EveAICrewAITask(Task):
self.specialist.update_progress("EveAI Task Initialisation", {"name": name}) self.specialist.update_progress("EveAI Task Initialisation", {"name": name})
# def create_task_callback(task: EveAICrewAITask):
# def task_callback(output):
# # Todo Check if required with new version of crewai
# if isinstance(output, BaseModel):
# task.specialist.log_tuning(f"TASK CALLBACK: EveAICrewAITask {task.name} Output:",
# {'output': output.model_dump()})
# if output.output_format == "pydantic" and not output.pydantic:
# try:
# raw_json = json.loads(output.raw)
# output_pydantic = task.output_pydantic(**raw_json)
# output.pydantic = output_pydantic
# task.specialist.log_tuning(f"TASK CALLBACK: EveAICrewAITask {task.name} Converted Output",
# {'output': output_pydantic.model_dump()})
# except Exception as e:
# task.specialist.log_tuning(f"TASK CALLBACK: EveAICrewAITask {task.name} Output Conversion Error: "
# f"{str(e)}", {})
#
# return task_callback
class EveAICrewAICrew(Crew): class EveAICrewAICrew(Crew):
specialist: Any = Field(default=None, exclude=True) specialist: Any = Field(default=None, exclude=True)
name: str = Field(default=None, exclude=True) name: str = Field(default=None, exclude=True)
@@ -107,6 +92,24 @@ class EveAICrewAICrew(Crew):
self.specialist.log_tuning("Initializing EveAICrewAICrew", {"name": self.name}) self.specialist.log_tuning("Initializing EveAICrewAICrew", {"name": self.name})
self.specialist.update_progress("EveAI Crew Initialisation", {"name": self.name}) self.specialist.update_progress("EveAI Crew Initialisation", {"name": self.name})
def kickoff(
self,
inputs: Optional[Dict[str, Any]] = None,
) -> CrewOutput:
with current_event.create_span(f"Crew {self.name} kickoff"):
start_time = time.time()
results = super().kickoff(inputs)
end_time = time.time()
metrics = {
"total_tokens": self.usage_metrics.total_tokens,
"prompt_tokens": self.usage_metrics.prompt_tokens,
"completion_tokens": self.usage_metrics.completion_tokens,
"time_elapsed": end_time - start_time,
"interaction_type": "Crew Execution"
}
current_event.log_llm_metrics(metrics)
return results
class EveAICrewAIFlow(Flow): class EveAICrewAIFlow(Flow):
specialist: Any = Field(default=None, exclude=True) specialist: Any = Field(default=None, exclude=True)

View File

@@ -244,11 +244,6 @@ def execute_specialist(self, tenant_id: int, specialist_id: int, arguments: Dict
session_id, session_id,
create_params={'timezone': user_timezone} create_params={'timezone': user_timezone}
) )
if cached_session:
current_app.logger.debug(f"Cached Session successfully retrieved for {session_id}: {cached_session.id}")
else:
current_app.logger.debug(f"No Cached Session retrieved for {session_id}")
# Get specialist from database # Get specialist from database
specialist = Specialist.query.get_or_404(specialist_id) specialist = Specialist.query.get_or_404(specialist_id)

View File

@@ -30,6 +30,8 @@ def update_usages():
error_list = [] error_list = []
for tenant_id in tenant_ids: for tenant_id in tenant_ids:
if tenant_id == 1:
continue
try: try:
Database(tenant_id).switch_schema() Database(tenant_id).switch_schema()
check_and_create_license_usage_for_tenant(tenant_id) check_and_create_license_usage_for_tenant(tenant_id)
@@ -65,6 +67,50 @@ def update_usages():
return "Update Usages taks completed successfully" return "Update Usages taks completed successfully"
@current_celery.task(name='persist_business_events', queue='entitlements')
def persist_business_events(log_entries):
"""
Persist multiple business event logs to the database in a single transaction
Args:
log_entries: List of log event dictionaries to persist
"""
try:
db_entries = []
for entry in log_entries:
event_log = BusinessEventLog(
timestamp=entry.pop('timestamp'),
event_type=entry.pop('event_type'),
tenant_id=entry.pop('tenant_id'),
trace_id=entry.pop('trace_id'),
span_id=entry.pop('span_id', None),
span_name=entry.pop('span_name', None),
parent_span_id=entry.pop('parent_span_id', None),
document_version_id=entry.pop('document_version_id', None),
document_version_file_size=entry.pop('document_version_file_size', None),
chat_session_id=entry.pop('chat_session_id', None),
interaction_id=entry.pop('interaction_id', None),
environment=entry.pop('environment', None),
llm_metrics_total_tokens=entry.pop('llm_metrics_total_tokens', None),
llm_metrics_prompt_tokens=entry.pop('llm_metrics_prompt_tokens', None),
llm_metrics_completion_tokens=entry.pop('llm_metrics_completion_tokens', None),
llm_metrics_total_time=entry.pop('llm_metrics_total_time', None),
llm_metrics_call_count=entry.pop('llm_metrics_call_count', None),
llm_interaction_type=entry.pop('llm_interaction_type', None),
message=entry.pop('message', None)
)
db_entries.append(event_log)
# Perform a bulk insert of all entries
db.session.bulk_save_objects(db_entries)
db.session.commit()
current_app.logger.info(f"Successfully persisted {len(db_entries)} business event logs")
except Exception as e:
current_app.logger.error(f"Failed to persist business event logs: {e}")
db.session.rollback()
def get_all_tenant_ids(): def get_all_tenant_ids():
tenant_ids = db.session.query(Tenant.id).all() tenant_ids = db.session.query(Tenant.id).all()
return [tenant_id[0] for tenant_id in tenant_ids] # Extract tenant_id from tuples return [tenant_id[0] for tenant_id in tenant_ids] # Extract tenant_id from tuples
@@ -193,6 +239,18 @@ def process_logs_for_license_usage(tenant_id, license_usage_id, logs):
interaction_completion_tokens_used += log.llm_metrics_completion_tokens interaction_completion_tokens_used += log.llm_metrics_completion_tokens
interaction_total_tokens_used += log.llm_metrics_total_tokens interaction_total_tokens_used += log.llm_metrics_total_tokens
# Case for 'Specialist Execution' event
elif log.event_type == 'Execute Specialist':
if log.message == 'Final LLM Metrics':
if log.span_name == 'Specialist Retrieval': # This is embedding
embedding_prompt_tokens_used += log.llm_metrics_prompt_tokens
embedding_completion_tokens_used += log.llm_metrics_completion_tokens
embedding_total_tokens_used += log.llm_metrics_total_tokens
else: # This is an interaction
interaction_prompt_tokens_used += log.llm_metrics_prompt_tokens
interaction_completion_tokens_used += log.llm_metrics_completion_tokens
interaction_total_tokens_used += log.llm_metrics_total_tokens
# Mark the log as processed by setting the license_usage_id # Mark the log as processed by setting the license_usage_id
log.license_usage_id = license_usage_id log.license_usage_id = license_usage_id

View File

@@ -0,0 +1,38 @@
"""Enlarging span_name field for business events
Revision ID: 4d2842d9c1d0
Revises: b02d9ad000f4
Create Date: 2025-03-06 14:27:37.986152
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4d2842d9c1d0'
down_revision = 'b02d9ad000f4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('business_event_log', schema=None) as batch_op:
batch_op.alter_column('span_name',
existing_type=sa.VARCHAR(length=50),
type_=sa.String(length=255),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('business_event_log', schema=None) as batch_op:
batch_op.alter_column('span_name',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=50),
existing_nullable=True)
# ### end Alembic commands ###