- Full implementation of Traicie Selection Specialist - VA version
- Improvements to CrewAI specialists and Specialists in general - Addition of reusable components to check or get answers to questions from the full Human Message - HumanAnswerServices
This commit is contained in:
108
common/services/utils/human_answer_services.py
Normal file
108
common/services/utils/human_answer_services.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from flask import current_app, session
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
|
||||
from common.utils.business_event import BusinessEvent
|
||||
from common.utils.business_event_context import current_event
|
||||
from common.utils.model_utils import get_template
|
||||
from eveai_chat_workers.outputs.globals.a2q_output.q_a_output_v1_0 import A2QOutput
|
||||
from eveai_chat_workers.outputs.globals.q_a_output.q_a_output_v1_0 import QAOutput
|
||||
|
||||
|
||||
class HumanAnswerServices:
|
||||
@staticmethod
|
||||
def check_affirmative_answer(tenant_id: int, question: str, answer: str, language_iso: str) -> bool:
|
||||
return HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso, "check_affirmative_answer",
|
||||
"Check Affirmative Answer")
|
||||
|
||||
@staticmethod
|
||||
def check_additional_information(tenant_id: int, question: str, answer: str, language_iso: str) -> bool:
|
||||
return HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso,
|
||||
"check_additional_information", "Check Additional Information")
|
||||
|
||||
@staticmethod
|
||||
def get_answer_to_question(tenant_id: int, question: str, answer: str, language_iso: str) -> str:
|
||||
|
||||
language = HumanAnswerServices._process_arguments(question, answer, language_iso)
|
||||
span_name = "Get Answer To Question"
|
||||
template_name = "get_answer_to_question"
|
||||
|
||||
if not current_event:
|
||||
with BusinessEvent('Answer Check Service', tenant_id):
|
||||
with current_event.create_span(span_name):
|
||||
return HumanAnswerServices._get_answer_to_question_logic(question, answer, language, template_name)
|
||||
else:
|
||||
with current_event.create_span('Check Affirmative Answer'):
|
||||
return HumanAnswerServices._get_answer_to_question_logic(question, answer, language, template_name)
|
||||
|
||||
@staticmethod
|
||||
def _check_answer(tenant_id: int, question: str, answer: str, language_iso: str, template_name: str,
|
||||
span_name: str) -> bool:
|
||||
language = HumanAnswerServices._process_arguments(question, answer, language_iso)
|
||||
if not current_event:
|
||||
with BusinessEvent('Answer Check Service', tenant_id):
|
||||
with current_event.create_span(span_name):
|
||||
return HumanAnswerServices._check_answer_logic(question, answer, language, template_name)
|
||||
else:
|
||||
with current_event.create_span(span_name):
|
||||
return HumanAnswerServices._check_answer_logic(question, answer, language, template_name)
|
||||
|
||||
@staticmethod
|
||||
def _check_answer_logic(question: str, answer: str, language: str, template_name: str) -> bool:
|
||||
prompt_params = {
|
||||
'question': question,
|
||||
'answer': answer,
|
||||
'language': language,
|
||||
}
|
||||
|
||||
template, llm = get_template(template_name)
|
||||
check_answer_prompt = ChatPromptTemplate.from_template(template)
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
output_schema = QAOutput
|
||||
structured_llm = llm.with_structured_output(output_schema)
|
||||
|
||||
chain = (setup | check_answer_prompt | structured_llm )
|
||||
|
||||
raw_answer = chain.invoke(prompt_params)
|
||||
current_app.logger.debug(f"Raw answer: {raw_answer}")
|
||||
|
||||
return raw_answer.answer
|
||||
|
||||
@staticmethod
|
||||
def _get_answer_to_question_logic(question: str, answer: str, language: str, template_name: str) \
|
||||
-> str:
|
||||
prompt_params = {
|
||||
'question': question,
|
||||
'answer': answer,
|
||||
'language': language,
|
||||
}
|
||||
|
||||
template, llm = get_template(template_name)
|
||||
check_answer_prompt = ChatPromptTemplate.from_template(template)
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
output_schema = A2QOutput
|
||||
structured_llm = llm.with_structured_output(output_schema)
|
||||
|
||||
chain = (setup | check_answer_prompt | structured_llm)
|
||||
|
||||
raw_answer = chain.invoke(prompt_params)
|
||||
current_app.logger.debug(f"Raw answer: {raw_answer}")
|
||||
|
||||
return raw_answer.answer
|
||||
|
||||
@staticmethod
|
||||
def _process_arguments(question, answer, language_iso: str) -> str:
|
||||
if language_iso.strip() == '':
|
||||
raise ValueError("Language cannot be empty")
|
||||
language = current_app.config.get('SUPPORTED_LANGUAGE_ISO639_1_LOOKUP').get(language_iso)
|
||||
if language is None:
|
||||
raise ValueError(f"Unsupported language: {language_iso}")
|
||||
if question.strip() == '':
|
||||
raise ValueError("Question cannot be empty")
|
||||
if answer.strip() == '':
|
||||
raise ValueError("Answer cannot be empty")
|
||||
|
||||
return language
|
||||
Reference in New Issue
Block a user