- Move global config files to globals iso global folder, as the name global conflicts with python language
- Creation of Traicie Vancancy Definition specialist - Allow to invoke non-interaction specialists from withing Evie's mgmt interface (eveai_app) - Improvements to crewai specialized classes - Introduction to json editor for showing specialists arguments and results in a better way - Introduction of more complex pagination (adding extra arguments) by adding a global 'get_pagination_html' - Allow follow-up of ChatSession / Specialist execution - Improvement in logging of Specialists (but needs to be finished)
This commit is contained in:
@@ -0,0 +1,5 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ListItem(BaseModel):
|
||||
title: str = Field(..., description="The title or name of the item")
|
||||
description: str = Field(..., description="A descriptive explanation of the item")
|
||||
@@ -0,0 +1,13 @@
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
|
||||
# class BehaviouralCompetence(BaseModel):
|
||||
# title: str = Field(..., description="The title of the behavioural competence.")
|
||||
# description: Optional[str] = Field(None, description="The description of the behavioural competence.")
|
||||
|
||||
class BehaviouralCompetencies(BaseModel):
|
||||
competencies: List[ListItem] = Field(
|
||||
default_factory=list,
|
||||
description="A list of behavioural competencies."
|
||||
)
|
||||
@@ -0,0 +1,13 @@
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
|
||||
# class KnockoutCriterion(BaseModel):
|
||||
# title: str = Field(..., description="The title of the knockout criterion.")
|
||||
# description: Optional[str] = Field(None, description="Further explanation of the knockout criterion.")
|
||||
|
||||
class KnockoutCriteria(BaseModel):
|
||||
criteria: List[ListItem] = Field(
|
||||
default_factory=list,
|
||||
description="A prioritized list of the 5 most critical knockout criteria, ranked by importance."
|
||||
)
|
||||
@@ -3,6 +3,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any, List
|
||||
from flask import current_app
|
||||
|
||||
from common.extensions import cache_manager
|
||||
from common.models.interaction import SpecialistRetriever
|
||||
from common.utils.execution_progress import ExecutionProgressTracker
|
||||
from config.logging_config import TuningLogger
|
||||
@@ -75,6 +76,8 @@ class BaseSpecialistExecutor(ABC):
|
||||
'tuning',
|
||||
tenant_id=self.tenant_id,
|
||||
specialist_id=self.specialist_id,
|
||||
session_id=self.session_id,
|
||||
log_file=f"logs/tuning_{self.session_id}.log"
|
||||
)
|
||||
# Verify logger is working with a test message
|
||||
if self.tuning:
|
||||
@@ -101,6 +104,13 @@ class BaseSpecialistExecutor(ABC):
|
||||
|
||||
def get_specialist_class(specialist_type: str, type_version: str):
|
||||
major_minor = '_'.join(type_version.split('.')[:2])
|
||||
module_path = f"eveai_chat_workers.specialists.{specialist_type}.{major_minor}"
|
||||
specialist_config = cache_manager.specialists_config_cache.get_config(specialist_type, type_version)
|
||||
partner = specialist_config.get("partner", None)
|
||||
current_app.logger.debug(f"Specialist partner for {specialist_type} {type_version} is {partner}")
|
||||
if partner:
|
||||
module_path = f"eveai_chat_workers.specialists.{partner}.{specialist_type}.{major_minor}"
|
||||
else:
|
||||
module_path = f"eveai_chat_workers.specialists.{specialist_type}.{major_minor}"
|
||||
current_app.logger.debug(f"Importing specialist class from {module_path}")
|
||||
module = importlib.import_module(module_path)
|
||||
return module.SpecialistExecutor
|
||||
|
||||
@@ -77,6 +77,9 @@ class EveAICrewAICrew(Crew):
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def __init__(self, specialist, name: str, **kwargs):
|
||||
if specialist.tuning:
|
||||
log_file = f"logs/crewai/{specialist.session_id}_{specialist.task_id}.txt"
|
||||
|
||||
super().__init__(**kwargs)
|
||||
self.specialist = specialist
|
||||
self.name = name
|
||||
|
||||
@@ -244,7 +244,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
||||
current_app.logger.error(f"Error detailing question: {e}")
|
||||
return question # Fallback to original question
|
||||
|
||||
def _retrieve_context(self, arguments: SpecialistArguments) -> Tuple[str, List[int]]:
|
||||
def _retrieve_context(self, arguments: SpecialistArguments) -> tuple[str, list[dict[str, Any]]]:
|
||||
with current_event.create_span("Specialist Retrieval"):
|
||||
self.log_tuning("Starting context retrieval", {
|
||||
"num_retrievers": len(self.retrievers),
|
||||
@@ -326,26 +326,29 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
||||
raise NotImplementedError
|
||||
|
||||
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
|
||||
# Detail the incoming query
|
||||
if self._cached_session.interactions:
|
||||
query = arguments.query
|
||||
language = arguments.language
|
||||
detailed_query = self._detail_question(language, query)
|
||||
current_app.logger.debug(f"Retrievers for this specialist: {self.retrievers}")
|
||||
if self.retrievers:
|
||||
# Detail the incoming query
|
||||
if self._cached_session.interactions:
|
||||
query = arguments.query
|
||||
language = arguments.language
|
||||
detailed_query = self._detail_question(language, query)
|
||||
else:
|
||||
detailed_query = arguments.query
|
||||
|
||||
modified_arguments = {
|
||||
"query": detailed_query,
|
||||
"original_query": arguments.query
|
||||
}
|
||||
detailed_arguments = arguments.model_copy(update=modified_arguments)
|
||||
formatted_context, citations = self._retrieve_context(detailed_arguments)
|
||||
result = self.execute(detailed_arguments, formatted_context, citations)
|
||||
modified_result = {
|
||||
"detailed_query": detailed_query,
|
||||
"citations": citations,
|
||||
}
|
||||
final_result = result.model_copy(update=modified_result)
|
||||
else:
|
||||
detailed_query = arguments.query
|
||||
|
||||
modified_arguments = {
|
||||
"query": detailed_query,
|
||||
"original_query": arguments.query
|
||||
}
|
||||
detailed_arguments = arguments.model_copy(update=modified_arguments)
|
||||
formatted_context, citations = self._retrieve_context(detailed_arguments)
|
||||
result = self.execute(detailed_arguments, formatted_context, citations)
|
||||
|
||||
modified_result = {
|
||||
"detailed_query": detailed_query,
|
||||
"citations": citations,
|
||||
}
|
||||
final_result = result.model_copy(update=modified_result)
|
||||
final_result = self.execute(arguments, "", [])
|
||||
|
||||
return final_result
|
||||
|
||||
@@ -10,7 +10,7 @@ from common.utils.business_event_context import current_event
|
||||
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments
|
||||
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
|
||||
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
|
||||
from eveai_chat_workers.outputs.rag.rag_v1_0 import RAGOutput
|
||||
from eveai_chat_workers.outputs.globals.rag.rag_v1_0 import RAGOutput
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ from common.utils.business_event_context import current_event
|
||||
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments
|
||||
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
|
||||
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
|
||||
from eveai_chat_workers.outputs.identification.identification_v1_0 import LeadInfoOutput
|
||||
from eveai_chat_workers.outputs.spin.spin_v1_0 import SPINOutput
|
||||
from eveai_chat_workers.outputs.rag.rag_v1_0 import RAGOutput
|
||||
from eveai_chat_workers.outputs.globals.identification.identification_v1_0 import LeadInfoOutput
|
||||
from eveai_chat_workers.outputs.globals.spin.spin_v1_0 import SPINOutput
|
||||
from eveai_chat_workers.outputs.globals.rag.rag_v1_0 import RAGOutput
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
from common.utils.pydantic_utils import flatten_pydantic_model
|
||||
|
||||
@@ -0,0 +1,158 @@
|
||||
import asyncio
|
||||
import json
|
||||
from os import wait
|
||||
from typing import Optional, List
|
||||
|
||||
from crewai.flow.flow import start, listen, and_
|
||||
from crewai import Process
|
||||
from flask import current_app
|
||||
from gevent import sleep
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from common.extensions import cache_manager
|
||||
from common.models.user import Tenant
|
||||
from common.utils.business_event_context import current_event
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments
|
||||
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
|
||||
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
|
||||
from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_0 import BehaviouralCompetencies
|
||||
from eveai_chat_workers.outputs.traicie.knockout_criteria.knockout_criteria_v1_0 import KnockoutCriteria
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
from common.utils.pydantic_utils import flatten_pydantic_model
|
||||
|
||||
|
||||
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
"""
|
||||
type: TRAICIE_VACANCY_DEFINITION_SPECIALIST
|
||||
type_version: 1.0
|
||||
Traicie Vacancy Definition Specialist Executor class
|
||||
"""
|
||||
|
||||
def __init__(self, tenant_id, specialist_id, session_id, task_id, **kwargs):
|
||||
self.vac_def_crew = None
|
||||
|
||||
super().__init__(tenant_id, specialist_id, session_id, task_id)
|
||||
|
||||
# Load the Tenant & set language
|
||||
self.tenant = Tenant.query.get_or_404(tenant_id)
|
||||
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return "TRAICIE_VACANCY_DEFINITION_SPECIALIST"
|
||||
|
||||
@property
|
||||
def type_version(self) -> str:
|
||||
return "1.0"
|
||||
|
||||
def _config_task_agents(self):
|
||||
self._add_task_agent("traicie_get_competencies_task", "traicie_hr_bp_agent")
|
||||
self._add_task_agent("traicie_get_ko_criteria_task", "traicie_hr_bp_agent")
|
||||
|
||||
def _config_pydantic_outputs(self):
|
||||
self._add_pydantic_output("traicie_get_competencies_task", BehaviouralCompetencies, "competencies")
|
||||
self._add_pydantic_output("traicie_get_ko_criteria_task", KnockoutCriteria, "criteria")
|
||||
|
||||
def _instantiate_specialist(self):
|
||||
verbose = self.tuning
|
||||
|
||||
vac_def_agents = [self.traicie_hr_bp_agent]
|
||||
vac_def_tasks = [self.traicie_get_competencies_task, self.traicie_get_ko_criteria_task]
|
||||
self.vac_def_crew = EveAICrewAICrew(
|
||||
self,
|
||||
"Vacancy Definition Crew",
|
||||
agents=vac_def_agents,
|
||||
tasks=vac_def_tasks,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
self.flow = VacancyDefinitionFlow(
|
||||
self,
|
||||
self.vac_def_crew
|
||||
)
|
||||
|
||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
self.log_tuning("Traicie Vacancy Definition Specialist execution started", {})
|
||||
|
||||
flow_inputs = {
|
||||
"vacancy_text": arguments.vacancy_text,
|
||||
}
|
||||
|
||||
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
||||
|
||||
flow_state = self.flow.state
|
||||
|
||||
results = VacancyDefinitionSpecialistResult.create_for_type(self.type, self.type_version)
|
||||
update_data = {}
|
||||
if flow_state.competencies:
|
||||
results.competencies = flow_state.competencies
|
||||
if flow_state.criteria:
|
||||
results.criteria = flow_state.criteria
|
||||
|
||||
self.log_tuning(f"Traicie Vacancy Definition Specialist execution ended", {"Results": results.model_dump()})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class VacancyDefinitionSpecialistInput(BaseModel):
|
||||
vacancy_text: Optional[str] = Field(None, alias="vacancy_text")
|
||||
|
||||
|
||||
class VacancyDefinitionSpecialistResult(SpecialistResult):
|
||||
competencies: Optional[List[ListItem]] = None
|
||||
criteria: Optional[List[ListItem]] = None
|
||||
|
||||
|
||||
class VacancyDefFlowState(EveAIFlowState):
|
||||
"""Flow state for Traicie Vacancy Definition specialist that automatically updates from task outputs"""
|
||||
input: Optional[VacancyDefinitionSpecialistInput] = None
|
||||
competencies: Optional[List[ListItem]] = None
|
||||
criteria: Optional[List[ListItem]] = None
|
||||
|
||||
|
||||
class VacancyDefinitionFlow(EveAICrewAIFlow[VacancyDefFlowState]):
|
||||
def __init__(self,
|
||||
specialist_executor: CrewAIBaseSpecialistExecutor,
|
||||
vac_def_crew: EveAICrewAICrew,
|
||||
**kwargs):
|
||||
super().__init__(specialist_executor, "Traicie Vacancy Definition Specialist Flow", **kwargs)
|
||||
self.specialist_executor = specialist_executor
|
||||
self.vac_def_crew = vac_def_crew
|
||||
self.exception_raised = False
|
||||
|
||||
@start()
|
||||
def process_inputs(self):
|
||||
return ""
|
||||
|
||||
@listen(process_inputs)
|
||||
async def execute_vac_def(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
try:
|
||||
current_app.logger.debug("In execute_vac_def")
|
||||
crew_output = await self.vac_def_crew.kickoff_async(inputs=inputs)
|
||||
# Unfortunately, crew_output will only contain the output of the latest task.
|
||||
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
||||
# are copies to the flow state.
|
||||
update = {}
|
||||
for task in self.vac_def_crew.tasks:
|
||||
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
|
||||
if task.name == "traicie_get_competencies_task":
|
||||
# update["competencies"] = task.output.pydantic.competencies
|
||||
self.state.competencies = task.output.pydantic.competencies
|
||||
elif task.name == "traicie_get_ko_criteria_task":
|
||||
# update["criteria"] = task.output.pydantic.criteria
|
||||
self.state.criteria = task.output.pydantic.criteria
|
||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||
current_app.logger.debug(f"State after execute_vac_def: {self.state}")
|
||||
current_app.logger.debug(f"State dump after execute_vac_def: {self.state.model_dump()}")
|
||||
return crew_output
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"CREW execute_vac_def Kickoff Error: {str(e)}")
|
||||
self.exception_raised = True
|
||||
raise e
|
||||
|
||||
async def kickoff_async(self, inputs=None):
|
||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
||||
self.state.input = VacancyDefinitionSpecialistInput.model_validate(inputs)
|
||||
result = await super().kickoff_async(inputs)
|
||||
return self.state
|
||||
@@ -253,6 +253,7 @@ def execute_specialist(self, tenant_id: int, specialist_id: int, arguments: Dict
|
||||
current_app.logger.info(
|
||||
f'execute_specialist: Processing request for tenant {tenant_id} using specialist {specialist_id}')
|
||||
|
||||
new_interaction = None
|
||||
try:
|
||||
# Ensure we have a session
|
||||
cached_session = cache_manager.chat_session_cache.get_cached_session(
|
||||
@@ -331,6 +332,13 @@ def execute_specialist(self, tenant_id: int, specialist_id: int, arguments: Dict
|
||||
except Exception as e:
|
||||
ept.send_update(task_id, "EveAI Specialist Error", {'Error': str(e)})
|
||||
current_app.logger.error(f'execute_specialist: Error executing specialist: {e}')
|
||||
new_interaction.processing_error = str(e)[:255]
|
||||
try:
|
||||
db.session.add(new_interaction)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'execute_specialist: Error updating interaction: {e}')
|
||||
|
||||
raise
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user