- Refinement of the chat client to have better visible clues for user vs chatbot messages

- Introduction of interview_phase and normal phase in TRAICIE_SELECTION_SPECIALIST to make interaction with bot more human.
- More and random humanised messages to TRAICIE_SELECTION_SPECIALIST
This commit is contained in:
Josako
2025-08-02 16:36:41 +02:00
parent 998ddf4c03
commit 9a88582fff
50 changed files with 2064 additions and 384 deletions

View File

@@ -77,7 +77,7 @@ class EveAICrewAICrew(Crew):
def __init__(self, specialist, name: str, **kwargs):
if specialist.tuning:
log_file = f"logs/crewai/{specialist.session_id}_{specialist.task_id}.txt"
kwargs['output_log_file'] = f"/app/logs/crew_{name}.txt"
super().__init__(**kwargs)
self.specialist = specialist
@@ -106,7 +106,7 @@ class EveAICrewAICrew(Crew):
class EveAICrewAIFlow(Flow):
specialist: Any = Field(default=None, exclude=True)
name: str = Field(default=None, exclude=True)
# name: str = Field(default=None, exclude=True)
model_config = ConfigDict(arbitrary_types_allowed=True)
def __init__(self, specialist, name: str, **kwargs):

View File

@@ -67,6 +67,10 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
# Format history for the prompt
self._formatted_history = self._generate_formatted_history()
self.arguments = None
self.formatted_context = None
self.citations = None
@property
def formatted_history(self) -> str:
if not self._formatted_history:
@@ -75,18 +79,20 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
def _generate_formatted_history(self) -> str:
"""Generate the formatted history string from cached session interactions."""
current_app.logger.debug(f"Generating formatted history for {self.session_id}")
current_app.logger.debug(f"Cached session interactions: {len(self._cached_session.interactions)}")
return "\n\n".join([
"\n\n".join([
f"HUMAN:\n"
f"{interaction.specialist_arguments['question']}"
if interaction.specialist_arguments.get('question') else "",
if interaction.specialist_arguments.get('question') and interaction.specialist_arguments.get('question') != "Initialize" else "",
f"{interaction.specialist_arguments.get('form_values')}"
if interaction.specialist_arguments.get('form_values') else "",
f"AI:\n{interaction.specialist_results['answer']}"
if interaction.specialist_results.get('answer') else ""
]).strip()
for interaction in self._cached_session.interactions
if interaction.specialist_arguments.get('question') != "Initialize"
])
def _add_task_agent(self, task_name: str, agent_name: str):
@@ -347,6 +353,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
raise NotImplementedError
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
self.log_tuning("*****************************************************************************************", {})
if self.retrievers:
formatted_context = None
citations = None

View File

@@ -19,7 +19,7 @@ from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew,
from common.services.interaction.specialist_services import SpecialistServices
NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST"
NEW_SPECIALIST_TYPE_VERSION = "1.4"
NEW_SPECIALIST_TYPE_VERSION = "1.5"
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):

View File

@@ -24,16 +24,56 @@ from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew,
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
INITIALISATION_MESSAGE = "Thank you for showing your interest! Let's start the selection process by asking you a few important questions."
START_SELECTION_QUESTION = "Do you want to start the selection procedure?"
INITIALISATION_MESSAGES = [
"Great! Let's see if this job might be a match for you by going through a few questions.",
"Nice to hear that! Ill start with a first question to kick things off.",
"Good to know! Lets begin with the first question.",
"Thanks for your reply. Let's get started with a few short questions.",
"Excellent! Here's a first question to explore your fit with the role.",
"Glad to hear that. Let's start with the first question.",
"Appreciate your response! Ill now ask you the first question.",
"Awesome! Let's begin with a few questions to learn more about you.",
"Perfect, thank you. Let's start the matching process with a first question.",
"Thanks for sharing that. Ready for the first question?"
]
START_SELECTION_QUESTIONS = [
"Shall we see if this job could be a good fit for you?",
"Shall we go through a few questions to explore if there's a potential match?",
"May I ask you a first question?",
"Are you interested in applying for this position?",
"Would you like to take the next step and answer a few short questions?",
"Shall we begin the application process together?",
"Would you like to start the matching process to see if this role suits your preferences?",
"Lets explore if this opportunity aligns with what you're looking for — ready for a few questions?",
"Would you be open to answering a few questions to learn more about the role and your fit?",
"Would you like to continue and start the first part of the application journey?"
]
INSUFFICIENT_INFORMATION_MESSAGE = (
"We do not have the necessary information to provide you with the requested answers. "
"Please accept our apologies. You can ask other questions or proceed with the "
"selection process.")
KO_CRITERIA_NOT_MET_MESSAGE = ("Thank you for answering our questions! We processed your answers. Unfortunately, you do"
"not comply with the minimum requirements for this job. Therefor, we stop this"
"selection procedure")
KO_CRITERIA_MET_MESSAGE = "We processed your answers with a positive result."
KO_CRITERIA_NOT_MET_MESSAGES = [
"Thank you for your answers. Based on your responses, we won't be moving forward with this particular role. We do encourage you to keep an eye on our website for future opportunities.",
"We appreciate the time you took to answer our questions. At this point, we wont be proceeding with your application, but feel free to check our website regularly for new vacancies.",
"Thanks for your input. While were not continuing with your application for this role, wed be happy to welcome your interest again in the future — new opportunities are posted regularly on our site.",
"Thank you for participating. Although this role doesnt seem to be the right match right now, we invite you to stay connected and check back for other opportunities.",
"We truly appreciate your time and effort. Unfortunately, we wont be progressing with this application, but we encourage you to visit our website again for future job openings.",
"Thanks so much for answering our questions. This role may not be the right fit, but wed love for you to consider applying again when new positions become available.",
"We value your interest in this position. While we wont be moving forward in this case, we warmly invite you to explore other roles with us in the future.",
"Your input has been very helpful. Although we're not proceeding at this time, we thank you for your interest and hope to see you again for other opportunities.",
"Thank you for taking part in the process. We wont continue with your application for this role, but we invite you to stay informed about future openings through our website."
]
KO_CRITERIA_MET_MESSAGES = [
"Thank you for your answers. They correspond to some key elements of the role. Would you be open to sharing your contact details so we can continue the selection process?",
"We appreciate your input. Based on your answers, we'd like to continue the conversation. Could you share your contact information with us?",
"Thanks for your replies. To proceed with the application process, may we ask you to provide your contact details?",
"Your answers help us better understand your background. If you're open to it, can share your contact info so we can follow up?",
"Thank you for taking the time to answer these questions. If you'd like to continue, could we have your contact information?",
"Your responses give us a good first impression. In order to move forward with the process, could you share your contact details?",
"Weve reviewed your answers with interest. To take the next step, would you be willing to share your contact information?",
"Your input has been recorded. If youre comfortable doing so, will you please leave your contact information so we can reach out for the next steps?",
"Wed like to keep in touch regarding the next phases of the selection. Could you provide your contact details for further communication?"
]
KO_CRITERIA_NEXT_MESSAGES = [
"Thank you for your answer. Here's a next question.",
"Your answer fits our needs. We have yet another question to ask you.",
@@ -42,23 +82,32 @@ KO_CRITERIA_NEXT_MESSAGES = [
"Appreciate your reply! Here's the next one.",
"Thanks for the input. Lets move on to the next question.",
"Thats exactly what we needed to hear. Here comes the next question.",
"Looks promising! Lets continue with another quick check.",
"Thanks! Here's another point we'd like to clarify."
"Looks promising! Lets continue with another quick check."
]
RQC_MESSAGE = "You are well suited for this job."
CONTACT_DATA_QUESTION = ("Are you willing to provide us with your contact data, so we can contact you to continue "
"the selection process?")
CONTACT_DATA_GUIDING_MESSAGE = ("Thank you for trusting your contact data with us. Below you find a form to help you "
"to provide us the necessary information.")
NO_CONTACT_DATA_QUESTION = ("We are sorry to hear that. The only way to proceed with the selection process is "
"to provide us with your contact data. Do you want to provide us with your contact data?"
"if not, we thank you, and we'll end the selection process.")
NO_CONTACT_DATA_QUESTIONS = [
"That's a pity! In order to continue, we do need your contact details. Would you be willing to share them? ",
"We understand your hesitation. However, to proceed with the process, your contact information is required. Would you like to share it with us?",
"Unfortunately, we can only move forward if you provide your contact details. Would you still consider sharing them with us?",
"Its totally your choice, of course. But without your contact details, we cant proceed further. Would you be open to sharing them?",
"Wed love to keep going, but we can only do so if we have your contact details. Would you like to provide them now?",
"Your privacy matters, and we respect your decision. Just know that without your contact details, well need to end the process here. Still interested in moving forward?",
"Its a shame to stop here, but we do need your contact info to proceed. Would you like to share it so we can continue?"
]
CONTACT_DATA_PROCESSED_MESSAGE = "Thank you for allowing us to contact you."
CONTACT_TIME_QUESTION = "When do you prefer us to contact you? You can select some options in the provided form"
NO_CONTACT_TIME_MESSAGE = ("We could not process your preferred contact time. Can you please provide us with your "
"preferred contact time?")
CONTACT_TIME_PROCESSED_MESSAGE = ("We successfully processed your preferred contact time. We will contact you as soon "
"as possible.")
CONTACT_TIME_PROCESSED_MESSAGES = [
"Thank you! We've received all the information we need to continue with the selection process. We'll get in touch with you as soon as possible. If you have any questions in the meantime, don't hesitate to ask.",
"Great, we have everything we need to proceed. We'll be in touch shortly. Don't hesitate to ask if anything comes up in the meantime.",
"Thanks for providing your details. We now have all the necessary information and will contact you soon. If you have any further questions, we're here to help.",
"Perfect, your information has been received. We'll move forward and get back to you as soon as we can. Feel free to reach out if you have any questions.",
"All set! Weve received everything needed to move forward. We'll contact you soon. In the meantime, feel free to ask us anything.",
"Thanks again! We've got everything we need to proceed. Expect to hear from us shortly. If anything is unclear, you're welcome to ask further questions.",
"Excellent, we now have all the information required to take the next steps. Well be in touch as soon as possible. If you have any questions, just let us know.",
"We appreciate your input. With all the needed details in place, well reach out shortly to continue the process. Questions are always welcome in the meantime.",
"Thank you for completing this step. We have all the information we need and will contact you as soon as we can. If you have questions, we're happy to assist."
]
NO_FURTHER_QUESTIONS_MESSAGE = "We do not process further questions."
SUCCESSFUL_ENDING_MESSAGE = "Thank you for your application. We will contact you as soon as possible!"
@@ -78,6 +127,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
# Load the Tenant & set language
self.tenant = Tenant.query.get_or_404(tenant_id)
self.specialist_phase = "initial"
self.previous_ai_question = None
@property
def type(self) -> str:
@@ -94,6 +144,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self._add_pydantic_output("rag_task", RAGOutput, "rag_output")
def _config_state_result_relations(self):
self._add_state_result_relation("ai_question")
self._add_state_result_relation("rag_output")
self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("current_ko_criterium")
@@ -103,7 +154,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def _instantiate_specialist(self):
verbose = self.tuning
rag_agents = [self.rag_agent]
rag_tasks = [self.rag_task]
self.rag_crew = EveAICrewAICrew(
@@ -113,7 +163,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
tasks=rag_tasks,
verbose=verbose,
)
self.flow = SelectionFlow(
self,
self.rag_crew,
@@ -126,7 +175,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.specialist_phase = "initial"
else:
self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
self.previous_ai_question = self._cached_session.interactions[-1].specialist_results.get('ai_question', '')
current_app.logger.debug(f"Current Specialist Phase: {self.specialist_phase}")
results = None
match self.specialist_phase:
case "initial":
@@ -166,10 +217,12 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations,
welcome_message)
# We are in orientation mode, so we give a standard message, and move to rag state
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION,
start_selection_question = TranslationServices.translate(self.tenant_id,
random.choice(START_SELECTION_QUESTIONS),
arguments.language)
self.flow.state.answer = f"{welcome_message}"
self.flow.state.phase = "rag"
self.flow.state.ai_question = welcome_message
results = SelectionResult.create_for_type(self.type, self.type_version)
@@ -178,7 +231,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations,
start_message=None) -> SpecialistResult:
initialisation_message = TranslationServices.translate(self.tenant_id, INITIALISATION_MESSAGE,
initialisation_message = TranslationServices.translate(self.tenant_id,
random.choice(INITIALISATION_MESSAGES),
arguments.language)
if start_message:
answer = f"{start_message}\n\n{initialisation_message}"
@@ -254,14 +308,15 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.ko_criteria_scores.append(score)
if evaluation == "negative":
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)
answer = TranslationServices.translate(self.tenant_id,
random.choices(KO_CRITERIA_NOT_MET_MESSAGES),
arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate"
results = SelectionResult.create_for_type(self.type, self.type_version)
else:
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
next_idx = previous_idx + 1
if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated
@@ -269,8 +324,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, arguments.language)
next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES)
answer = TranslationServices.translate(self.tenant_id, next_message, arguments.language)
if rag_output:
answer = f"{rag_output.answer}\n\n{answer}"
self.flow.state.answer = answer
self.flow.state.form_request = ko_form
@@ -278,13 +331,10 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.current_ko_criterium_idx = next_idx
self.flow.state.phase = "ko_question_evaluation"
else: # All KO Criteria have been met
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{answer}\n\n{rag_output.answer}"
answer = (f"{answer}\n\n"
f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} \n\n"
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
answer = TranslationServices.translate(self.tenant_id,
random.choice(KO_CRITERIA_MET_MESSAGES),
arguments.language)
self.flow.state.ai_question = answer
self.flow.state.answer = answer
self.flow.state.current_ko_criterium = ""
@@ -299,28 +349,25 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
-> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_preparation started", {})
if HumanAnswerServices.check_affirmative_answer(self.tenant_id, CONTACT_DATA_QUESTION,
if HumanAnswerServices.check_affirmative_answer(self.tenant_id, self.previous_ai_question,
arguments.question, arguments.language):
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0")
contact_form = cache_manager.specialist_forms_config_cache.get_config("MINIMAL_PERSONAL_CONTACT_FORM", "1.0")
contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields",
arguments.language)
guiding_message = TranslationServices.translate(self.tenant_id, CONTACT_DATA_GUIDING_MESSAGE,
answer = TranslationServices.translate(self.tenant_id, CONTACT_DATA_GUIDING_MESSAGE,
arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{rag_output.answer}\n\n{guiding_message}"
else:
answer = guiding_message
self.flow.state.answer = answer
self.flow.state.form_request = contact_form
self.flow.state.phase = "personal_contact_data_processing"
results = SelectionResult.create_for_type(self.type, self.type_version,)
else:
answer = TranslationServices.translate(self.tenant_id, NO_CONTACT_DATA_QUESTION, arguments.language)
answer = TranslationServices.translate(self.tenant_id,
random.choice(NO_CONTACT_DATA_QUESTIONS),
arguments.language)
self.flow.state.answer = answer
self.flow.state.ai_question = answer
self.flow.state.phase = "personal_contact_data_preparation"
results = SelectionResult.create_for_type(self.type, self.type_version,)
@@ -330,18 +377,16 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_personal_contact_data_processing(self, arguments: SpecialistArguments, formatted_context, citations) \
-> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_processing started", {})
contact_time_question = TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, arguments.language)
answer = (
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, arguments.language)}\n"
f"{TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, arguments.language)}")
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, arguments.language)} "
f"{contact_time_question}")
time_pref_form = cache_manager.specialist_forms_config_cache.get_config("CONTACT_TIME_PREFERENCES_SIMPLE", "1.0")
time_pref_form = TranslationServices.translate_config(self.tenant_id, time_pref_form, "fields",
arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{answer}\n\n{rag_output.answer}"
self.flow.state.answer = answer
self.flow.state.ai_question = contact_time_question
self.flow.state.phase = "contact_time_evaluation"
self.flow.state.personal_contact_data = arguments.form_values
self.flow.state.form_request = time_pref_form
@@ -361,11 +406,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.log_tuning("Traicie Selection Specialist contact_time_evaluation started", {})
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
message = TranslationServices.translate(self.tenant_id, CONTACT_TIME_PROCESSED_MESSAGE, arguments.language)
answer = TranslationServices.translate(self.tenant_id, CONTACT_TIME_PROCESSED_MESSAGE, arguments.language)
if rag_output:
answer = f"{rag_output.answer}\n\n{message}"
answer = TranslationServices.translate(self.tenant_id,
random.choice(CONTACT_TIME_PROCESSED_MESSAGES),
arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "candidate_selected"
@@ -387,8 +430,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_no_valid_candidate_state(self, arguments: SpecialistArguments, formatted_context, citations) \
-> SpecialistResult:
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
answer = (f"{TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)}\n"
f"{TranslationServices.translate(self.tenant_id, NO_FURTHER_QUESTIONS_MESSAGE, arguments.language)}\n")
answer = TranslationServices.translate(self.tenant_id,
random.choice(KO_CRITERIA_NOT_MET_MESSAGES),
arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate"
@@ -411,13 +455,14 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
-> SpecialistResult:
self.log_tuning("Traicie Selection Specialist rag_state started", {})
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION,
start_selection_question = TranslationServices.translate(self.tenant_id,
random.choice(START_SELECTION_QUESTIONS),
arguments.language)
rag_output = None
if HumanAnswerServices.check_additional_information(self.tenant_id,
START_SELECTION_QUESTION,
random.choice(START_SELECTION_QUESTIONS),
arguments.question,
arguments.language):
rag_output = self.execute_rag(arguments, formatted_context, citations)
@@ -427,7 +472,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
answer = ""
if HumanAnswerServices.check_affirmative_answer(self.tenant_id,
START_SELECTION_QUESTION,
random.choice(START_SELECTION_QUESTIONS),
arguments.question,
arguments.language):
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer)
@@ -468,9 +513,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
return rag_output
def _check_and_execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput:
def _check_and_execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput | None:
if HumanAnswerServices.check_additional_information(self.tenant_id,
START_SELECTION_QUESTION,
self.previous_ai_question,
arguments.question,
arguments.language):
rag_output = self.execute_rag(arguments, formatted_context, citations)
@@ -610,6 +655,7 @@ class SelectionInput(BaseModel):
class SelectionFlowState(EveAIFlowState):
"""Flow state for RAG specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = None
current_ko_criterium: Optional[str] = None
current_ko_criterium_idx: Optional[int] = None
@@ -620,6 +666,7 @@ class SelectionFlowState(EveAIFlowState):
class SelectionResult(SpecialistResult):
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
@@ -643,6 +690,9 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
@listen(process_inputs)
async def execute_rag(self):
inputs = self.state.input.model_dump()
current_app.logger.debug(f"execute_rag inputs: ---------------------------------------------------------------"
f" {inputs}")
try:
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump())
@@ -658,6 +708,7 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
raise e
async def kickoff_async(self, inputs=None):
current_app.logger.debug(f"kickoff SelectionFlow: ---------------------------------------------------------------")
self.state.input = SelectionInput.model_validate(inputs)
result = await super().kickoff_async(inputs)
return self.state

View File

@@ -0,0 +1,793 @@
import json
import random
from datetime import date
from typing import Optional, List, Dict, Any
from crewai.flow.flow import start, listen, router
from flask import current_app
from pydantic import BaseModel, Field, EmailStr
from common.extensions import cache_manager, db, minio_client
from common.models.interaction import EveAIAsset
from common.models.user import Tenant
from common.services.interaction.capsule_services import CapsuleServices
from common.services.utils.human_answer_services import HumanAnswerServices
from common.services.utils.translation_services import TranslationServices
from common.utils.business_event_context import current_event
from common.utils.eveai_exceptions import EveAISpecialistExecutionError
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL, get_language_level_context
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE, get_tone_of_voice_context
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
from eveai_chat_workers.outputs.globals.rag.rag_v1_0 import RAGOutput
from eveai_chat_workers.outputs.traicie.affirmative_answer.affirmative_answer_v1_0 import TraicieAffirmativeAnswerOutput
from eveai_chat_workers.outputs.traicie.interview_mode.interview_mode_v1_0 import TraicieInterviewModeOutput
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestion, KOQuestions
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
INITIALISATION_MESSAGES = [
"Great! Let's see if this job might be a match for you by going through a few questions.",
"Nice to hear that! Ill start with a first question to kick things off.",
"Good to know! Lets begin with the first question.",
"Thanks for your reply. Let's get started with a few short questions.",
"Excellent! Here's a first question to explore your fit with the role.",
"Glad to hear that. Let's start with the first question.",
"Appreciate your response! Ill now ask you the first question.",
"Awesome! Let's begin with a few questions to learn more about you.",
"Perfect, thank you. Let's start the matching process with a first question.",
"Thanks for sharing that. Ready for the first question?"
]
START_SELECTION_QUESTIONS = [
"Shall we see if this job could be a good fit for you?",
"Shall we go through a few questions to explore if there's a potential match?",
"May I ask you a first question?",
"Are you interested in applying for this position?",
"Would you like to take the next step and answer a few short questions?",
"Shall we begin the application process together?",
"Would you like to start the matching process to see if this role suits your preferences?",
"Lets explore if this opportunity aligns with what you're looking for — ready for a few questions?",
"Would you be open to answering a few questions to learn more about the role and your fit?",
"Would you like to continue and start the first part of the application journey?"
]
TRY_TO_START_SELECTION_QUESTIONS = [
"That's a pity — we can only move forward if we start the selection process. Would you like to begin now?",
"We understand, though its worth mentioning that the only way to continue is to start the procedure. Shall we get started after all?",
"Too bad! To proceed, we do need to go through the selection steps. Would you be open to starting now?",
"Were sorry to hear that. The next steps are only possible if we start the selection process. Would you reconsider and allow us to begin?",
"That's unfortunate — continuing isnt possible without starting the process. Are you sure you dont want to begin now?",
"Thanks for your response. Just so you know: we can only continue if we go through the initial questions. Shall we start anyway?",
"We respect your answer, of course. Still, wed love to continue — but thats only possible if we begin the selection process. Can we do that now?",
"We get it — but to move forward, the selection process does need to be started. Would you like to give it a go?",
"Understood! However, we can't proceed without initiating the process. Would you like to start it now after all?",
"We appreciate your honesty. Just to clarify: the process only continues if we begin the selection. Shall we go ahead?"
]
INSUFFICIENT_INFORMATION_MESSAGES = [
"I'm afraid I don't have enough information to answer that properly. Feel free to ask something else!",
"There isnt enough data available right now to give you a clear answer. You're welcome to rephrase or ask a different question.",
"Sorry, I can't provide a complete answer based on the current information. Would you like to try asking something else?",
"I dont have enough details to give you a confident answer. You can always ask another question if youd like.",
"Unfortunately, I cant answer that accurately with the information at hand. Please feel free to ask something else.",
"Thats a great question, but I currently lack the necessary information to respond properly. Want to ask something different?",
"I wish I could help more, but the data I have isn't sufficient to answer this. Youre welcome to explore other questions.",
"Theres not enough context for me to provide a good answer. Dont hesitate to ask another question if you'd like!",
"I'm not able to give a definitive answer to that. Perhaps try a different question or angle?",
"Thanks for your question. At the moment, I cant give a solid answer — but I'm here if you want to ask something else!"
]
KO_CRITERIA_NOT_MET_MESSAGES = [
"Thank you for your answers. Based on your responses, we won't be moving forward with this particular role. We do encourage you to keep an eye on our website for future opportunities.",
"We appreciate the time you took to answer our questions. At this point, we wont be proceeding with your application, but feel free to check our website regularly for new vacancies.",
"Thanks for your input. While were not continuing with your application for this role, wed be happy to welcome your interest again in the future — new opportunities are posted regularly on our site.",
"Thank you for participating. Although this role doesnt seem to be the right match right now, we invite you to stay connected and check back for other opportunities.",
"We truly appreciate your time and effort. Unfortunately, we wont be progressing with this application, but we encourage you to visit our website again for future job openings.",
"Thanks so much for answering our questions. This role may not be the right fit, but wed love for you to consider applying again when new positions become available.",
"We value your interest in this position. While we wont be moving forward in this case, we warmly invite you to explore other roles with us in the future.",
"Your input has been very helpful. Although we're not proceeding at this time, we thank you for your interest and hope to see you again for other opportunities.",
"Thank you for taking part in the process. We wont continue with your application for this role, but we invite you to stay informed about future openings through our website."
]
KO_CRITERIA_MET_MESSAGES = [
"Thank you for your answers. They correspond to some key elements of the role. Would you be open to sharing your contact details so we can continue the selection process?",
"We appreciate your input. Based on your answers, we'd like to continue the conversation. Could you share your contact information with us?",
"Thanks for your replies. To proceed with the application process, may we ask you to provide your contact details?",
"Your answers help us better understand your background. If you're open to it, can share your contact info so we can follow up?",
"Thank you for taking the time to answer these questions. If you'd like to continue, could we have your contact information?",
"Your responses give us a good first impression. In order to move forward with the process, could you share your contact details?",
"Weve reviewed your answers with interest. To take the next step, would you be willing to share your contact information?",
"Your input has been recorded. If youre comfortable doing so, will you please leave your contact information so we can reach out for the next steps?",
"Wed like to keep in touch regarding the next phases of the selection. Could you provide your contact details for further communication?"
]
KO_CRITERIA_NEXT_MESSAGES = [
"Thank you for your answer. Here's a next question.",
"Your answer fits our needs. We have yet another question to ask you.",
"Positive this far! Here's a follow-up question.",
"Great, thats just what we were hoping for. Lets continue with another question.",
"Appreciate your reply! Here's the next one.",
"Thanks for the input. Lets move on to the next question.",
"Thats exactly what we needed to hear. Here comes the next question.",
"Looks promising! Lets continue with another quick check."
]
CONTACT_DATA_GUIDING_MESSAGE = ("Thank you for trusting your contact data with us. Below you find a form to help you "
"to provide us the necessary information.")
NO_CONTACT_DATA_QUESTIONS = [
"That's a pity! In order to continue, we do need your contact details. Would you be willing to share them? ",
"We understand your hesitation. However, to proceed with the process, your contact information is required. Would you like to share it with us?",
"Unfortunately, we can only move forward if you provide your contact details. Would you still consider sharing them with us?",
"Its totally your choice, of course. But without your contact details, we cant proceed further. Would you be open to sharing them?",
"Wed love to keep going, but we can only do so if we have your contact details. Would you like to provide them now?",
"Your privacy matters, and we respect your decision. Just know that without your contact details, well need to end the process here. Still interested in moving forward?",
"Its a shame to stop here, but we do need your contact info to proceed. Would you like to share it so we can continue?"
]
CONTACT_DATA_QUESTIONS = [
"Could you please share your contact details so we can reach out to you for the next steps in the selection process?",
"Would you be willing to provide your contact information so we can continue with your application?",
"Can you share a way for us to contact you as we move forward with the selection process?",
"May we have your contact details so we can follow up with the next steps?",
"Would you mind sharing your contact information to proceed with the selection?",
"Can you provide your email address or phone number so we can get in touch?",
"Shall we continue? If so, could you let us know how we can best reach you?",
"To move forward, may we contact you? If yes, could you share your details?",
"Are you comfortable sharing your contact information so we can follow up?",
"Would you like to continue the process by providing your contact details?"
]
CONTACT_DATA_PROCESSED_MESSAGE = "Thank you for allowing us to contact you."
CONTACT_TIME_QUESTION = "When do you prefer us to contact you? You can select some options in the provided form"
CONTACT_TIME_PROCESSED_MESSAGES = [
"Thank you! We've received all the information we need to continue with the selection process. We'll get in touch with you as soon as possible. If you have any questions in the meantime, don't hesitate to ask.",
"Great, we have everything we need to proceed. We'll be in touch shortly. Don't hesitate to ask if anything comes up in the meantime.",
"Thanks for providing your details. We now have all the necessary information and will contact you soon. If you have any further questions, we're here to help.",
"Perfect, your information has been received. We'll move forward and get back to you as soon as we can. Feel free to reach out if you have any questions.",
"All set! Weve received everything needed to move forward. We'll contact you soon. In the meantime, feel free to ask us anything.",
"Thanks again! We've got everything we need to proceed. Expect to hear from us shortly. If anything is unclear, you're welcome to ask further questions.",
"Excellent, we now have all the information required to take the next steps. Well be in touch as soon as possible. If you have any questions, just let us know.",
"We appreciate your input. With all the needed details in place, well reach out shortly to continue the process. Questions are always welcome in the meantime.",
"Thank you for completing this step. We have all the information we need and will contact you as soon as we can. If you have questions, we're happy to assist."
]
NO_FURTHER_QUESTIONS_MESSAGE = "We do not process further questions."
SUCCESSFUL_ENDING_MESSAGES = [
"Thank you for your application! We'll contact you as soon as possible. If you have any questions in the meantime, dont hesitate to reach out.",
"We appreciate your interest and the information youve shared. We'll be in touch shortly. Feel free to contact us if anything comes up.",
"Thanks again for your application. Well get back to you soon. In the meantime, were happy to answer any questions you may have.",
"Your application has been received. Well reach out to you as soon as we can. If you need anything in the meantime, just let us know.",
"Thank you for completing the first steps. Well follow up as quickly as possible. If you have further questions, we're here to help.",
"Thanks for taking the time to apply! Well contact you shortly. Let us know if you have any questions or need additional information.",
"Weve received everything we need for now — thank you! Well be in touch soon. Dont hesitate to ask if somethings unclear.",
"Were looking forward to speaking with you. Thanks again for your application, and feel free to reach out if you need anything.",
"Thanks! Well contact you soon to discuss the next steps. In the meantime, were happy to answer any further questions.",
"Your application is complete — thank you! Well be reaching out shortly. If youd like to ask anything in the meantime, were available."
]
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
"""
type: TRAICIE_SELECTION_SPECIALIST
type_version: 1.1
Traicie Selection Specialist Executor class
"""
def __init__(self, tenant_id, specialist_id, session_id, task_id, **kwargs):
self.rag_crew = None
self.determination_crew = None
self.affirmative_answer_crew = None
super().__init__(tenant_id, specialist_id, session_id, task_id)
# Load the Tenant & set language
self.tenant = Tenant.query.get_or_404(tenant_id)
self.specialist_phase = "initial"
self.previous_ai_question = None
self.previous_interview_phase = None
@property
def type(self) -> str:
return "TRAICIE_SELECTION_SPECIALIST"
@property
def type_version(self) -> str:
return "1.5"
def _config_task_agents(self):
self._add_task_agent("advanced_rag_task", "rag_agent")
self._add_task_agent("traicie_determine_interview_mode_task", "traicie_recruiter_agent")
self._add_task_agent("traicie_affirmative_answer_check_task", "traicie_recruiter_agent")
def _config_pydantic_outputs(self):
self._add_pydantic_output("advanced_rag_task", RAGOutput, "rag_output")
self._add_pydantic_output("traicie_determine_interview_mode_task", TraicieInterviewModeOutput, "interview_mode")
self._add_pydantic_output("traicie_affirmative_answer_check_task", TraicieAffirmativeAnswerOutput, "affirmative_answer")
def _config_state_result_relations(self):
self._add_state_result_relation("ai_question")
self._add_state_result_relation("rag_output")
self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("current_ko_criterium")
self._add_state_result_relation("current_ko_criterium_idx")
self._add_state_result_relation("personal_contact_data")
self._add_state_result_relation("contact_time_prefs")
self._add_state_result_relation("interview_phase")
self._add_state_result_relation("interview_mode")
self._add_state_result_relation("affirmative_answer")
def _instantiate_specialist(self):
verbose = self.tuning
rag_agents = [self.rag_agent]
recruitment_agents = [self.traicie_recruiter_agent]
rag_tasks = [self.advanced_rag_task]
determination_tasks = [self.traicie_determine_interview_mode_task]
affirmative_answer_tasks = [self.traicie_affirmative_answer_check_task]
self.rag_crew = EveAICrewAICrew(
self,
"Advanced Rag Crew",
agents=rag_agents,
tasks=rag_tasks,
verbose=verbose,
)
self.determination_crew = EveAICrewAICrew(
self,
"Determination Crew",
agents=recruitment_agents,
tasks=determination_tasks,
verbose=verbose,
)
self.affirmative_answer_crew = EveAICrewAICrew(
self,
"Affirmative Answer Crew",
agents=recruitment_agents,
tasks=affirmative_answer_tasks,
verbose=verbose,
)
self.flow = SelectionFlow(
self,
self.rag_crew,
self.determination_crew,
self.affirmative_answer_crew
)
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist execution started", {})
self.arguments = arguments
self.formatted_context = formatted_context
self.citations = citations
self.log_tuning("Traicie Selection Specialist inputs", {
"Arguments": arguments.model_dump(),
"Formatted Context": formatted_context,
"Citations": citations,
"History": self._formatted_history
})
if not self._cached_session.interactions:
self.specialist_phase = "initial"
else:
self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
self.previous_ai_question = self._cached_session.interactions[-1].specialist_results.get('ai_question', '')
self.previous_interview_phase = self._cached_session.interactions[-1].specialist_results.get('interview_phase', '')
results = None
match self.specialist_phase:
case "initial":
results = self.execute_initial_state()
case "start_selection_procedure":
results = self.execute_start_selection_procedure_state()
case "rag":
results = self.execute_rag_state()
case "ko_question_evaluation":
results = self.execute_ko_question_evaluation()
case "personal_contact_data_preparation":
results = self.execute_personal_contact_data_preparation()
case "personal_contact_data_processing":
results = self.execute_personal_contact_data_processing()
case "contact_time_evaluation":
results = self.execute_contact_time_evaluation_state()
case "no_valid_candidate":
results = self.execute_no_valid_candidate_state(arguments, formatted_context, citations)
self.log_tuning(f"Traicie Selection Specialist execution ended",
{"Results": results.model_dump() if results else "No info"})
return results
def execute_initial_state(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist initial_state execution started", {})
interaction_mode = self.arguments.interaction_mode
if not interaction_mode:
interaction_mode = "selection"
welcome_message = self.specialist.configuration.get("welcome_message", "Welcome to our selection process.")
welcome_message = TranslationServices.translate(self.tenant_id, welcome_message, self.arguments.language)
if interaction_mode == "selection":
return self.execute_start_selection_procedure_state(welcome_message)
# We are in orientation mode, so we give a standard message, and move to rag state
start_selection_question = TranslationServices.translate(self.tenant_id,
random.choice(START_SELECTION_QUESTIONS),
self.arguments.language)
self.flow.state.answer = f"{welcome_message}"
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "start_selection_procedure"
self.flow.state.ai_question = welcome_message
results = SelectionResult.create_for_type(self.type, self.type_version)
return results
def execute_start_selection_procedure_state(self, start_message=None) -> SpecialistResult:
initialisation_message = TranslationServices.translate(self.tenant_id,
random.choice(INITIALISATION_MESSAGES),
self.arguments.language)
if start_message:
answer = f"{start_message}\n\n{initialisation_message}"
else:
answer = initialisation_message
ko_questions = self._get_ko_questions()
current_ko_criterium = ko_questions.ko_questions[0].title
current_ko_criterium_idx = 0
ko_form = self._prepare_ko_question_form(ko_questions, current_ko_criterium, self.arguments.language)
self.flow.state.current_ko_criterium = current_ko_criterium
self.flow.state.current_ko_criterium_idx = current_ko_criterium_idx
self.flow.state.ko_criteria_scores = []
self.flow.state.answer = answer
self.flow.state.phase = "ko_question_evaluation"
self.flow.state.form_request = ko_form
results = SelectionResult.create_for_type(self.type, self.type_version)
return results
def execute_ko_question_evaluation(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist ko_question_evaluation started", {})
# Check if the form has been returned (it should)
if not self.arguments.form_values:
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
"No form values returned")
ko_questions = self._get_ko_questions()
previous_idx = self.flow.state.current_ko_criterium_idx
previous_ko_question = ko_questions.ko_questions[previous_idx]
# Evaluate KO Criteria
evaluation = "positive"
criterium, answer = next(iter(self.arguments.form_values.items()))
if TranslationServices.translate(self.tenant_id, previous_ko_question.answer_positive, self.arguments.language) != answer:
evaluation = "negative"
score = SelectionKOCriteriumScore(
criterium=criterium,
answer=answer,
score=1 if evaluation == "positive" else 0,
)
self.flow.state.ko_criteria_scores.append(score)
if evaluation == "negative":
answer = TranslationServices.translate(self.tenant_id,
random.choices(KO_CRITERIA_NOT_MET_MESSAGES),
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate"
results = SelectionResult.create_for_type(self.type, self.type_version)
else:
next_idx = previous_idx + 1
if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated
next_ko_criterium = ko_questions.ko_questions[next_idx]
ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, self.arguments.language)
next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES)
answer = TranslationServices.translate(self.tenant_id, next_message, self.arguments.language)
self.flow.state.answer = answer
self.flow.state.form_request = ko_form
self.flow.state.current_ko_criterium = next_ko_criterium.title
self.flow.state.current_ko_criterium_idx = next_idx
self.flow.state.phase = "ko_question_evaluation"
else: # All KO Criteria have been met
answer = TranslationServices.translate(self.tenant_id,
random.choice(KO_CRITERIA_MET_MESSAGES),
self.arguments.language)
self.flow.state.ai_question = answer
self.flow.state.answer = answer
self.flow.state.current_ko_criterium = ""
self.flow.state.current_ko_criterium_idx = None
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "personal_contact_data_preparation"
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_personal_contact_data_preparation(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_preparation started", {})
contact_form = cache_manager.specialist_forms_config_cache.get_config("MINIMAL_PERSONAL_CONTACT_FORM", "1.0")
contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields",
self.arguments.language)
answer = TranslationServices.translate(self.tenant_id, CONTACT_DATA_GUIDING_MESSAGE,
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.form_request = contact_form
self.flow.state.phase = "personal_contact_data_processing"
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_personal_contact_data_processing(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_processing started", {})
contact_time_question = TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, self.arguments.language)
answer = (
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, self.arguments.language)} "
f"{contact_time_question}")
time_pref_form = cache_manager.specialist_forms_config_cache.get_config("CONTACT_TIME_PREFERENCES_SIMPLE", "1.0")
time_pref_form = TranslationServices.translate_config(self.tenant_id, time_pref_form, "fields",
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.ai_question = contact_time_question
self.flow.state.interview_phase = "contact_time_evaluation"
self.flow.state.phase = "contact_time_evaluation"
self.flow.state.personal_contact_data = self.arguments.form_values
self.flow.state.form_request = time_pref_form
rqc_info = {
"ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
}
CapsuleServices.push_capsule_data(self._cached_session.id, "TRAICIE_RQC", "1.0", {}, rqc_info)
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_contact_time_evaluation_state(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist contact_time_evaluation started", {})
answer = TranslationServices.translate(self.tenant_id,
random.choice(CONTACT_TIME_PROCESSED_MESSAGES),
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "candidate_selected"
self.flow.state.contact_time_prefs = self.arguments.form_values
rqc_info = {
"ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
"contact_time_prefs": self.flow.state.contact_time_prefs,
}
CapsuleServices.push_capsule_data(self._cached_session.id, "TRAICIE_RQC", "1.0", {}, rqc_info)
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_no_valid_candidate_state(self, arguments: SpecialistArguments, formatted_context, citations) \
-> SpecialistResult:
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
answer = TranslationServices.translate(self.tenant_id,
random.choice(KO_CRITERIA_NOT_MET_MESSAGES),
arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate"
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_rag_state(self) -> None | SpecialistResult:
self.log_tuning("Traicie Selection Specialist rag_state started", {})
# Prepare & Execute Selection Flow
formatted_context, citations = self._retrieve_context(self.arguments)
self.flow.state.citations = citations
tone_of_voice = self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
tone_of_voice_context = get_tone_of_voice_context(tone_of_voice)
language_level = self.specialist.configuration.get('language_level', 'Standard')
language_level_context = get_language_level_context(language_level)
flow_inputs = {
"language": self.arguments.language,
"question": self.arguments.question,
"context": formatted_context,
"history": self.formatted_history,
"name": self.specialist.configuration.get('name', ''),
"tone_of_voice": tone_of_voice,
"tone_of_voice_context": tone_of_voice_context,
"language_level": language_level,
"language_level_context": language_level_context,
}
flow_results = self.flow.kickoff(inputs=flow_inputs)
# Handle the results - stored in the state object
if self.flow.state.interview_mode == "RAG":
# In case of RAG mode, we get a rag_output
if self.flow.state.rag_output:
if self.flow.state.rag_output.insufficient_info:
answer = TranslationServices.translate(self.tenant_id,
random.choice(INSUFFICIENT_INFORMATION_MESSAGES),
self.arguments.language)
else:
answer = self.flow.state.rag_output.answer
else:
current_app.logger.error("No RAG output found in the state object!")
answer = TranslationServices.translate(self.tenant_id,
random.choice(INSUFFICIENT_INFORMATION_MESSAGES),
self.arguments.language)
interview_question = self._get_question_for_interview_phase()
self.flow.state.answer = f"{self.flow.state.rag_output.answer}\n{interview_question}"
self.flow.state.phase = "rag"
self.flow.state.interview_phase = self.previous_interview_phase
else: # self.flow.state.interview_mode == "CHECK"
if self.previous_interview_phase == "candidate_selected": # We blijven in RAG mode
interview_question = self._get_question_for_interview_phase()
self.flow.state.answer = interview_question
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "candidate_selected"
else:
if self.flow.state.affirmative_answer:
return self._execute_next_interview_phase()
else:
self.flow.state.answer = self._respond_to_negative_answer()
self.flow.state.phase = "rag"
self.flow.state.interview_phase = self.previous_interview_phase
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def _get_ko_questions(self) -> KOQuestions:
ko_questions_asset = db.session.query(EveAIAsset).filter(
EveAIAsset.type == "TRAICIE_KO_CRITERIA_QUESTIONS",
EveAIAsset.type_version == "1.0.0",
EveAIAsset.configuration.is_not(None),
EveAIAsset.configuration.has_key('specialist_id'),
EveAIAsset.configuration['specialist_id'].astext.cast(db.Integer) == self.specialist_id
).first()
if not ko_questions_asset:
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
"No KO criteria questions found")
# Register Asset Usage
prompt_tokens = ko_questions_asset.prompt_tokens
completion_tokens = ko_questions_asset.completion_tokens
total_tokens = prompt_tokens + completion_tokens
metrics = {
'total_tokens': total_tokens,
'prompt_tokens': prompt_tokens,
'completion_tokens': completion_tokens,
'time_elapsed': 0,
'interaction_type': 'ASSET',
}
current_event.log_llm_metrics(metrics)
ko_questions_data = minio_client.download_asset_file(self.tenant_id, ko_questions_asset.bucket_name,
ko_questions_asset.object_name)
ko_questions = KOQuestions.from_json(ko_questions_data)
return ko_questions
def _prepare_ko_question_form(self, ko_questions: KOQuestions, current_ko_criterium: str, language: str) \
-> Dict[str, Any]:
fields = {}
ko_question = ko_questions.get_by_title(current_ko_criterium)
fields[ko_question.title] = {
"name": ko_question.title,
"description": ko_question.title,
"context": ko_question.question,
"type": "options",
"required": True,
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
}
ko_form = {
"type": "KO_CRITERIA_FORM",
"version": "1.0.0",
"name": f"Starter Question: {current_ko_criterium}",
"icon": "verified",
"fields": fields,
}
ko_form = TranslationServices.translate_config(self.tenant_id, ko_form, "fields", language)
return ko_form
def _get_question_for_interview_phase(self) -> str:
question = None
match self.previous_interview_phase:
case "start_selection_procedure":
question = random.choice(START_SELECTION_QUESTIONS)
case "personal_contact_data_preparation":
question = random.choice(CONTACT_DATA_QUESTIONS)
case "candidate_selected":
question = random.choice(SUCCESSFUL_ENDING_MESSAGES)
translated_question = TranslationServices.translate(self.tenant_id, question, self.arguments.language)
return translated_question
def _respond_to_negative_answer(self) -> str:
question = None
match self.previous_interview_phase:
case "start_selection_procedure":
question = random.choice(TRY_TO_START_SELECTION_QUESTIONS)
case "personal_contact_data_preparation":
question = random.choice(NO_CONTACT_DATA_QUESTIONS)
translated_question = TranslationServices.translate(self.tenant_id, question, self.arguments.language)
return translated_question
def _execute_next_interview_phase(self) -> SpecialistResult | None:
match self.previous_interview_phase:
case "start_selection_procedure":
return self.execute_start_selection_procedure_state()
case "personal_contact_data_preparation":
return self.execute_personal_contact_data_preparation()
return None
class SelectionKOCriteriumScore(BaseModel):
criterium: Optional[str] = Field(None, alias="criterium")
answer: Optional[str] = Field(None, alias="answer")
score: Optional[int] = Field(None, alias="score")
class PersonalContactData(BaseModel):
name: str = Field(..., description="Your name", alias="name")
email: EmailStr = Field(..., description="Your Name", alias="email")
phone: str = Field(..., description="Your Phone Number", alias="phone")
address: Optional[str] = Field(None, description="Your Address", alias="address")
zip: Optional[str] = Field(None, description="Postal Code", alias="zip")
city: Optional[str] = Field(None, description="City", alias="city")
country: Optional[str] = Field(None, description="Country", alias="country")
consent: bool = Field(..., description="Consent", alias="consent")
class ContactTimePreferences(BaseModel):
early: Optional[bool] = Field(None, description="Early", alias="early")
late_morning: Optional[bool] = Field(None, description="Late Morning", alias="late_morning")
afternoon: Optional[bool] = Field(None, description="Afternoon", alias="afternoon")
evening: Optional[bool] = Field(None, description="Evening", alias="evening")
other: Optional[str] = Field(None, description="Other", alias="other")
class SelectionInput(BaseModel):
# RAG elements
language: Optional[str] = Field(None, alias="language")
question: Optional[str] = Field(None, alias="question")
context: Optional[str] = Field(None, alias="context")
citations: Optional[List[int]] = Field(None, alias="citations")
history: Optional[str] = Field(None, alias="history")
name: Optional[str] = Field(None, alias="name")
# Selection elements
region: Optional[str] = Field(None, alias="region")
working_schedule: Optional[str] = Field(None, alias="working_schedule")
start_date: Optional[date] = Field(None, alias="vacancy_text")
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
language_level: Optional[str] = Field(None, alias="language_level")
language_level_context: Optional[str] = Field(None, alias="language_level_context")
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
class SelectionFlowState(EveAIFlowState):
"""Flow state for RAG specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = None
current_ko_criterium: Optional[str] = None
current_ko_criterium_idx: Optional[int] = None
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = None
personal_contact_data: Optional[PersonalContactData] = None
contact_time_prefs: Optional[ContactTimePreferences] = None
citations: Optional[List[Dict[str, Any]]] = None
interview_phase: Optional[str] = None
interview_mode: Optional[str] = None
affirmative_answer: Optional[bool] = None
class SelectionResult(SpecialistResult):
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
contact_time_prefs: Optional[ContactTimePreferences] = None
interview_phase: Optional[str] = None
interview_mode: Optional[str] = None
affirmative_answer: Optional[bool] = None
class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
def __init__(self,
specialist_executor: CrewAIBaseSpecialistExecutor,
rag_crew: EveAICrewAICrew,
determination_crew: EveAICrewAICrew,
affirmative_answer_crew: EveAICrewAICrew,
**kwargs):
super().__init__(specialist_executor, "Selection Specialist Flow", **kwargs)
self.specialist_executor = specialist_executor
self.rag_crew = rag_crew
self.determination_crew = determination_crew
self.affirmative_answer_crew = affirmative_answer_crew
self.exception_raised = False
@start()
def process_inputs(self):
return ""
@listen(process_inputs)
async def execute_determination(self):
try:
inputs = self.state.input.model_dump()
crew_output = await self.determination_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("Determination Crew Output", crew_output.model_dump())
output_pydantic = crew_output.pydantic
if not output_pydantic:
raw_json = json.loads(crew_output.raw)
output_pydantic = SelectionResult(**raw_json)
self.state.interview_mode = output_pydantic.mode
return output_pydantic
except Exception as e:
current_app.logger.error(f"Determination Crew Error: {e}")
self.exception_raised = True
raise e
@router(execute_determination)
def interview_mode_routing(self):
# interview mode can be RAG or CHECK
return self.state.interview_mode
@listen("RAG")
async def execute_rag(self):
try:
inputs = self.state.input.model_dump()
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("Advanced RAG Crew Output", crew_output.model_dump())
output_pydantic = crew_output.pydantic
if not output_pydantic:
raw_json = json.loads(crew_output.raw)
output_pydantic = RAGOutput.model_validate(raw_json)
self.state.rag_output = output_pydantic
return output_pydantic
except Exception as e:
current_app.logger.error(f"CREW rag_crew Error: {str(e)}")
self.exception_raised = True
raise e
@listen("CHECK")
async def check_affirmative_answer(self):
try:
inputs = self.state.input.model_dump()
crew_output = await self.affirmative_answer_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("Traicie Check Affirmative Answer Crew Output", crew_output.model_dump())
output_pydantic = crew_output.pydantic
if not output_pydantic:
raw_json = json.loads(crew_output.raw)
output_pydantic = RAGOutput.model_validate(raw_json)
self.state.affirmative_answer = output_pydantic.affirmative
return output_pydantic
except Exception as e:
current_app.logger.error(f"CREW rag_crew Error: {str(e)}")
self.exception_raised = True
raise e
async def kickoff_async(self, inputs=None):
self.state.input = SelectionInput.model_validate(inputs)
result = await super().kickoff_async(inputs)
self.specialist_executor.log_tuning("Specialist Executor Output", self.state.model_dump())
return self.state