|
|
|
|
@@ -3,10 +3,12 @@ from typing import Dict, Any, Optional, Type, TypeVar, List, Tuple
|
|
|
|
|
|
|
|
|
|
from crewai.flow.flow import FlowState
|
|
|
|
|
from flask import current_app
|
|
|
|
|
from langchain_core.output_parsers import StrOutputParser
|
|
|
|
|
from langchain_core.prompts import ChatPromptTemplate
|
|
|
|
|
|
|
|
|
|
from common.models.interaction import Specialist
|
|
|
|
|
from common.utils.business_event_context import current_event
|
|
|
|
|
from common.utils.model_utils import get_model_variables, get_crewai_llm
|
|
|
|
|
from common.utils.model_utils import get_model_variables, get_crewai_llm, create_language_template
|
|
|
|
|
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments
|
|
|
|
|
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAIAgent, EveAICrewAITask
|
|
|
|
|
from crewai.tools import BaseTool
|
|
|
|
|
@@ -20,7 +22,7 @@ from common.utils.cache.crewai_configuration import (
|
|
|
|
|
ProcessedAgentConfig, ProcessedTaskConfig, ProcessedToolConfig,
|
|
|
|
|
SpecialistProcessedConfig
|
|
|
|
|
)
|
|
|
|
|
from eveai_chat_workers.specialists.specialist_typing import SpecialistArguments
|
|
|
|
|
from eveai_chat_workers.specialists.specialist_typing import SpecialistArguments, SpecialistResult
|
|
|
|
|
|
|
|
|
|
T = TypeVar('T') # For generic type hints
|
|
|
|
|
|
|
|
|
|
@@ -67,18 +69,19 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|
|
|
|
# Format history for the prompt
|
|
|
|
|
self._formatted_history = "\n\n".join([
|
|
|
|
|
f"HUMAN:\n{interaction.specialist_results.get('detailed_query')}\n\n"
|
|
|
|
|
f"AI:\n{interaction.specialist_results.get('answer')}"
|
|
|
|
|
f"AI:\n{interaction.specialist_results.get('rag_output').get('answer')}"
|
|
|
|
|
for interaction in self._cached_session.interactions
|
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def formatted_history(self) -> str:
|
|
|
|
|
formatted_history = "\n\n".join([
|
|
|
|
|
f"HUMAN:\n{interaction.specialist_results.get('query')}\n\n"
|
|
|
|
|
f"AI:\n{interaction.specialist_results.get('rag_output').get('answer', '')}"
|
|
|
|
|
for interaction in self._cached_session.interactions
|
|
|
|
|
])
|
|
|
|
|
return formatted_history
|
|
|
|
|
if not self._formatted_history:
|
|
|
|
|
self._formatted_history = "\n\n".join([
|
|
|
|
|
f"HUMAN:\n{interaction.specialist_results.get('detailed_query')}\n\n"
|
|
|
|
|
f"AI:\n{interaction.specialist_results.get('rag_output').get('answer', '')}"
|
|
|
|
|
for interaction in self._cached_session.interactions
|
|
|
|
|
])
|
|
|
|
|
return self._formatted_history
|
|
|
|
|
|
|
|
|
|
def _add_task_agent(self, task_name: str, agent_name: str):
|
|
|
|
|
self._task_agents[task_name.lower()] = agent_name
|
|
|
|
|
@@ -201,15 +204,68 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|
|
|
|
"""Instantiate a crew (or flow) to set up the complete specialist, using the assets (agents, tasks, tools).
|
|
|
|
|
The assets can be retrieved using their type name in lower case, e.g. rag_agent"""
|
|
|
|
|
|
|
|
|
|
def retrieve_context(self, arguments: SpecialistArguments) -> Tuple[str, List[int]]:
|
|
|
|
|
def _detail_question(self, language: str, question: str) -> str:
|
|
|
|
|
"""Detail question based on conversation history"""
|
|
|
|
|
try:
|
|
|
|
|
with current_event.create_span("Specialist Detail Question"):
|
|
|
|
|
# Get LLM and template
|
|
|
|
|
llm = self.model_variables.get_llm(temperature=0.3)
|
|
|
|
|
template = cache_manager.prompts_config_cache.get_config('history').get('content', '')
|
|
|
|
|
current_app.logger.debug(f"History Template: {template}")
|
|
|
|
|
language_template = create_language_template(template, language)
|
|
|
|
|
current_app.logger.debug(f"History Language Template: {template}")
|
|
|
|
|
|
|
|
|
|
# Create prompt
|
|
|
|
|
history_prompt = ChatPromptTemplate.from_template(language_template)
|
|
|
|
|
|
|
|
|
|
# Create chain
|
|
|
|
|
chain = (
|
|
|
|
|
history_prompt |
|
|
|
|
|
llm |
|
|
|
|
|
StrOutputParser()
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Execute chain
|
|
|
|
|
current_app.logger.debug(f"Formatted History: {self.formatted_history}")
|
|
|
|
|
detailed_question = chain.invoke({
|
|
|
|
|
"history": self.formatted_history,
|
|
|
|
|
"question": question
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
self.log_tuning("_detail_question", {
|
|
|
|
|
"cached_session_id": self._cached_session.session_id,
|
|
|
|
|
"cached_session.interactions": str(self._cached_session.interactions),
|
|
|
|
|
"original_question": question,
|
|
|
|
|
"history_used": self.formatted_history,
|
|
|
|
|
"detailed_question": detailed_question,
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
self.update_progress("Detail Question", {"name": self.type})
|
|
|
|
|
|
|
|
|
|
return detailed_question
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
current_app.logger.error(f"Error detailing question: {e}")
|
|
|
|
|
return question # Fallback to original question
|
|
|
|
|
|
|
|
|
|
def _retrieve_context(self, arguments: SpecialistArguments) -> Tuple[str, List[int]]:
|
|
|
|
|
with current_event.create_span("Specialist Retrieval"):
|
|
|
|
|
self.log_tuning("Starting context retrieval", {
|
|
|
|
|
"num_retrievers": len(self.retrievers),
|
|
|
|
|
"all arguments": arguments.model_dump(),
|
|
|
|
|
})
|
|
|
|
|
current_app.logger.debug(f"Retrieving context from arguments: {arguments}")
|
|
|
|
|
|
|
|
|
|
original_query = arguments.query
|
|
|
|
|
detailed_query = self._detail_question(arguments.language, original_query)
|
|
|
|
|
|
|
|
|
|
modified_arguments = arguments.model_copy(update={
|
|
|
|
|
"query": detailed_query,
|
|
|
|
|
"original_query": original_query
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
# Get retriever-specific arguments
|
|
|
|
|
retriever_arguments = arguments.retriever_arguments
|
|
|
|
|
retriever_arguments = modified_arguments.retriever_arguments
|
|
|
|
|
|
|
|
|
|
# Collect context from all retrievers
|
|
|
|
|
all_context = []
|
|
|
|
|
@@ -233,6 +289,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|
|
|
|
retriever_args = RetrieverArguments(**current_retriever_args)
|
|
|
|
|
|
|
|
|
|
# Each retriever gets its own specific arguments
|
|
|
|
|
current_app.logger.debug(f"Retrieving context {retriever_id} with arguments {retriever_args}")
|
|
|
|
|
retriever_result = retriever.retrieve(retriever_args)
|
|
|
|
|
all_context.extend(retriever_result)
|
|
|
|
|
|
|
|
|
|
@@ -266,3 +323,31 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|
|
|
|
"Citations": citations})
|
|
|
|
|
|
|
|
|
|
return formatted_context, citations
|
|
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
|
def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
|
|
|
|
|
# Detail the incoming query
|
|
|
|
|
if self._cached_session.interactions:
|
|
|
|
|
query = arguments.query
|
|
|
|
|
language = arguments.language
|
|
|
|
|
detailed_query = self._detail_question(language, query)
|
|
|
|
|
else:
|
|
|
|
|
detailed_query = arguments.query
|
|
|
|
|
|
|
|
|
|
modified_arguments = {
|
|
|
|
|
"query": detailed_query,
|
|
|
|
|
"original_query": arguments.query
|
|
|
|
|
}
|
|
|
|
|
detailed_arguments = arguments.model_copy(update=modified_arguments)
|
|
|
|
|
formatted_context, citations = self._retrieve_context(detailed_arguments)
|
|
|
|
|
result = self.execute(detailed_arguments, formatted_context, citations)
|
|
|
|
|
|
|
|
|
|
modified_result = {
|
|
|
|
|
"detailed_query": detailed_query,
|
|
|
|
|
}
|
|
|
|
|
final_result = result.model_copy(update=modified_result)
|
|
|
|
|
|
|
|
|
|
return final_result
|
|
|
|
|
|