- Full implementation of Traicie Selection Specialist - VA version
- Improvements to CrewAI specialists and Specialists in general - Addition of reusable components to check or get answers to questions from the full Human Message - HumanAnswerServices
This commit is contained in:
@@ -131,6 +131,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
|
||||
|
||||
flow_inputs = {
|
||||
'name': "Evie",
|
||||
'tone_of_voice': tone_of_voice,
|
||||
'tone_of_voice_context': tone_of_voice_context,
|
||||
'language_level': language_level,
|
||||
@@ -243,6 +244,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
|
||||
|
||||
class KODefInput(BaseModel):
|
||||
name: Optional[str] = Field(None, alias="name")
|
||||
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
|
||||
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
|
||||
language_level: Optional[str] = Field(None, alias="language_level")
|
||||
|
||||
@@ -19,7 +19,7 @@ from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew,
|
||||
from common.services.interaction.specialist_services import SpecialistServices
|
||||
|
||||
NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST"
|
||||
NEW_SPECIALIST_TYPE_VERSION = "1.3"
|
||||
NEW_SPECIALIST_TYPE_VERSION = "1.4"
|
||||
|
||||
|
||||
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
|
||||
@@ -1,29 +1,50 @@
|
||||
import asyncio
|
||||
import json
|
||||
from os import wait
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import date
|
||||
from time import sleep
|
||||
from crewai.flow.flow import start, listen, and_
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from crewai.flow.flow import start, listen
|
||||
from flask import current_app
|
||||
from pydantic import BaseModel, Field, EmailStr
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from common.extensions import db
|
||||
from common.extensions import cache_manager, db, minio_client
|
||||
from common.models.interaction import EveAIAsset
|
||||
from common.models.user import Tenant
|
||||
from common.models.interaction import Specialist
|
||||
from common.services.utils.human_answer_services import HumanAnswerServices
|
||||
from common.services.utils.translation_services import TranslationServices
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestions, KOQuestion
|
||||
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
|
||||
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
|
||||
from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Competencies
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
from common.services.interaction.specialist_services import SpecialistServices
|
||||
from common.extensions import cache_manager
|
||||
from common.utils.eveai_exceptions import EveAISpecialistExecutionError
|
||||
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL
|
||||
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE
|
||||
from common.utils.eveai_exceptions import EveAISpecialistExecutionError
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
from eveai_chat_workers.outputs.globals.rag.rag_v1_0 import RAGOutput
|
||||
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestion, KOQuestions
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
|
||||
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
|
||||
|
||||
INITIALISATION_MESSAGE = "Let's start the selection process by asking you a few important questions."
|
||||
START_SELECTION_QUESTION = "Do you want to start the selection procedure?"
|
||||
INSUFFICIENT_INFORMATION_MESSAGE = (
|
||||
"We do not have the necessary information to provide you with the requested answers. "
|
||||
"Please accept our apologies. You can ask other questions or proceed with the "
|
||||
"selection process.")
|
||||
KO_CRITERIA_NOT_MET_MESSAGE = ("Thank you for answering our questions! We processed your answers. Unfortunately, you do"
|
||||
"not comply with the minimum requirements for this job. Therefor, we stop this"
|
||||
"selection procedure")
|
||||
KO_CRITERIA_MET_MESSAGE = "We processed your answers with a positive result."
|
||||
RQC_MESSAGE = "You are well suited for this job."
|
||||
CONTACT_DATA_QUESTION = ("Are you willing to provide us with your contact data, so we can contact you to continue "
|
||||
"the selection process?")
|
||||
NO_CONTACT_DATA_QUESTION = ("We are sorry to hear that. The only way to proceed with the selection process is "
|
||||
"to provide us with your contact data. Do you want to provide us with your contact data?"
|
||||
"if not, we thank you, and we'll end the selection process.")
|
||||
CONTACT_DATA_PROCESSED_MESSAGE = "We successfully processed your contact data."
|
||||
CONTACT_TIME_QUESTION = "When do you prefer us to contact you? Provide us with some preferred weekdays and times!"
|
||||
NO_CONTACT_TIME_MESSAGE = ("We could not process your preferred contact time. Can you please provide us with your "
|
||||
"preferred contact time?")
|
||||
CONTACT_TIME_PROCESSED_MESSAGE = ("We successfully processed your preferred contact time. We will contact you as soon "
|
||||
"as possible.")
|
||||
NO_FURTHER_QUESTIONS_MESSAGE = "We do not process further questions."
|
||||
SUCCESSFUL_ENDING_MESSAGE = "Thank you for your application. We will contact you as soon as possible!"
|
||||
|
||||
|
||||
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
@@ -47,37 +68,39 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
|
||||
@property
|
||||
def type_version(self) -> str:
|
||||
return "1.3"
|
||||
return "1.4"
|
||||
|
||||
def _config_task_agents(self):
|
||||
self._add_task_agent("traicie_ko_criteria_interview_definition_task", "traicie_recruiter_agent")
|
||||
self._add_task_agent("rag_task", "rag_agent")
|
||||
|
||||
def _config_pydantic_outputs(self):
|
||||
self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions")
|
||||
self._add_pydantic_output("rag_task", RAGOutput, "rag_output")
|
||||
|
||||
def _config_state_result_relations(self):
|
||||
self._add_state_result_relation("rag_output")
|
||||
self._add_state_result_relation("ko_criteria_questions")
|
||||
self._add_state_result_relation("ko_criteria_scores")
|
||||
self._add_state_result_relation("ko_criteria_answers")
|
||||
self._add_state_result_relation("competency_questions")
|
||||
self._add_state_result_relation("competency_scores")
|
||||
self._add_state_result_relation("personal_contact_data")
|
||||
self._add_state_result_relation("contact_time")
|
||||
|
||||
def _instantiate_specialist(self):
|
||||
verbose = self.tuning
|
||||
|
||||
ko_def_agents = [self.traicie_recruiter_agent]
|
||||
ko_def_tasks = [self.traicie_ko_criteria_interview_definition_task]
|
||||
self.ko_def_crew = EveAICrewAICrew(
|
||||
rag_agents = [self.rag_agent]
|
||||
rag_tasks = [self.rag_task]
|
||||
self.rag_crew = EveAICrewAICrew(
|
||||
self,
|
||||
"KO Criteria Interview Definition Crew",
|
||||
agents=ko_def_agents,
|
||||
tasks=ko_def_tasks,
|
||||
"Rag Crew",
|
||||
agents=rag_agents,
|
||||
tasks=rag_tasks,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
self.flow = SelectionFlow(
|
||||
self,
|
||||
self.ko_def_crew
|
||||
self.rag_crew,
|
||||
)
|
||||
|
||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
@@ -94,73 +117,62 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
||||
|
||||
results = None
|
||||
|
||||
current_app.logger.debug(f"Specialist phase: {specialist_phase}")
|
||||
match specialist_phase:
|
||||
case "initial":
|
||||
results = self.execute_initial_state(arguments, formatted_context, citations)
|
||||
case "start_selection_procedure":
|
||||
results = self.execute_start_selection_procedure_state(arguments, formatted_context, citations)
|
||||
case "rag":
|
||||
results = self.execute_rag_state(arguments, formatted_context, citations)
|
||||
case "ko_question_evaluation":
|
||||
results = self.execute_ko_question_evaluation(arguments, formatted_context, citations)
|
||||
case "personal_contact_data":
|
||||
results = self.execute_personal_contact_data(arguments, formatted_context, citations)
|
||||
case "personal_contact_data_preparation":
|
||||
results = self.execute_personal_contact_data_preparation(arguments, formatted_context, citations)
|
||||
case "personal_contact_data_processing":
|
||||
results = self.execute_personal_contact_data_processing(arguments, formatted_context, citations)
|
||||
case "contact_time_evaluation":
|
||||
results = self.execute_contact_time_evaluation_state(arguments, formatted_context, citations)
|
||||
case "no_valid_candidate":
|
||||
results = self.execute_no_valid_candidate(arguments, formatted_context, citations)
|
||||
results = self.execute_no_valid_candidate_state(arguments, formatted_context, citations)
|
||||
case "candidate_selected":
|
||||
results = self.execute_candidate_selected(arguments, formatted_context, citations)
|
||||
results = self.execute_candidate_selected_state(arguments, formatted_context, citations)
|
||||
|
||||
self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"})
|
||||
self.log_tuning(f"Traicie Selection Specialist execution ended",
|
||||
{"Results": results.model_dump() if results else "No info"})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist initial_state execution started", {})
|
||||
|
||||
current_app.logger.debug(f"Specialist Competencies:\n{self.specialist.configuration.get("competencies", [])}")
|
||||
interaction_mode = arguments.interaction_mode
|
||||
if not interaction_mode:
|
||||
interaction_mode = "selection"
|
||||
current_app.logger.debug(f"Interaction mode: {interaction_mode}")
|
||||
|
||||
ko_competencies = []
|
||||
for competency in self.specialist.configuration.get("competencies", []):
|
||||
if competency["is_knockout"] is True and competency["assess"] is True:
|
||||
current_app.logger.debug(f"Assessable Knockout competency: {competency}")
|
||||
ko_competencies.append({"title: ": competency["title"], "description": competency["description"]})
|
||||
welcome_message = self.specialist.configuration.get("welcome_message", "Welcome to our selection process.")
|
||||
welcome_message = TranslationServices.translate(self.tenant_id, welcome_message, arguments.language)
|
||||
|
||||
tone_of_voice = self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
|
||||
selected_tone_of_voice = next(
|
||||
(item for item in TONE_OF_VOICE if item["name"] == tone_of_voice),
|
||||
None # fallback indien niet gevonden
|
||||
)
|
||||
current_app.logger.debug(f"Selected tone of voice: {selected_tone_of_voice}")
|
||||
tone_of_voice_context = f"{selected_tone_of_voice["description"]}"
|
||||
if interaction_mode == "selection":
|
||||
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations,
|
||||
welcome_message)
|
||||
else: # We are in orientation mode, so we perform standard rag
|
||||
return self.execute_rag_state(arguments, formatted_context, citations, welcome_message)
|
||||
|
||||
language_level = self.specialist.configuration.get('language_level', 'Standard')
|
||||
selected_language_level = next(
|
||||
(item for item in LANGUAGE_LEVEL if item["name"] == language_level),
|
||||
None
|
||||
)
|
||||
current_app.logger.debug(f"Selected language level: {selected_language_level}")
|
||||
language_level_context = (f"{selected_language_level['description']}, "
|
||||
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
|
||||
def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations,
|
||||
start_message=None) -> SpecialistResult:
|
||||
|
||||
flow_inputs = {
|
||||
"region": arguments.region,
|
||||
"working_schedule": arguments.working_schedule,
|
||||
"start_date": arguments.start_date,
|
||||
"language": arguments.language,
|
||||
"interaction_mode": arguments.interaction_mode,
|
||||
'tone_of_voice': tone_of_voice,
|
||||
'tone_of_voice_context': tone_of_voice_context,
|
||||
'language_level': language_level,
|
||||
'language_level_context': language_level_context,
|
||||
'ko_criteria': ko_competencies,
|
||||
}
|
||||
|
||||
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
||||
|
||||
current_app.logger.debug(f"Flow results: {flow_results}")
|
||||
|
||||
current_app.logger.debug(f"Flow state: {self.flow.state}")
|
||||
answer = ""
|
||||
if start_message:
|
||||
initialisation_message = TranslationServices.translate(self.tenant_id, INITIALISATION_MESSAGE,
|
||||
arguments.language)
|
||||
answer = f"{start_message}\n\n{initialisation_message}"
|
||||
|
||||
ko_questions = self._get_ko_questions()
|
||||
fields = {}
|
||||
for ko_question in self.flow.state.ko_criteria_questions:
|
||||
for ko_question in ko_questions.ko_questions:
|
||||
current_app.logger.debug(f"KO Question: {ko_question}")
|
||||
fields[ko_question.title] = {
|
||||
"name": ko_question.title,
|
||||
"description": ko_question.title,
|
||||
@@ -178,105 +190,259 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
"fields": fields,
|
||||
}
|
||||
|
||||
answer = f"Let's start our selection process by asking you a few important questions."
|
||||
rag_answer = self._check_and_execute_rag(arguments, formatted_context, citations)
|
||||
if rag_answer:
|
||||
if answer:
|
||||
answer = f"{answer}\n\n{rag_answer.answer}"
|
||||
else:
|
||||
answer = rag_answer.answer
|
||||
|
||||
if arguments.language != 'en':
|
||||
TranslationServices.translate_config(self.tenant_id, ko_form, "fields", arguments.language)
|
||||
TranslationServices.translate(self.tenant_id, answer, arguments.language)
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "ko_question_evaluation"
|
||||
self.flow.state.form_request = ko_form
|
||||
|
||||
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=answer,
|
||||
form_request=ko_form,
|
||||
phase="ko_question_evaluation")
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version)
|
||||
|
||||
return results
|
||||
|
||||
def execute_ko_question_evaluation(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
def execute_ko_question_evaluation(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist ko_question_evaluation started", {})
|
||||
|
||||
# Check if the form has been returned (it should)
|
||||
if not arguments.form_values:
|
||||
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id, "No form values returned")
|
||||
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
|
||||
"No form values returned")
|
||||
current_app.logger.debug(f"Form values: {arguments.form_values}")
|
||||
|
||||
# Load the previous KO Questions
|
||||
previous_ko_questions = self.flow.state.ko_criteria_questions
|
||||
previous_ko_questions = self._get_ko_questions().ko_questions
|
||||
current_app.logger.debug(f"Previous KO Questions: {previous_ko_questions}")
|
||||
|
||||
# Evaluate KO Criteria
|
||||
evaluation = "positive"
|
||||
for criterium, answer in arguments.form_values.items():
|
||||
for qa in previous_ko_questions:
|
||||
if qa.get("title") == criterium:
|
||||
if qa.get("answer_positive") != answer:
|
||||
if qa.title == criterium:
|
||||
if qa.answer_positive != answer:
|
||||
evaluation = "negative"
|
||||
break
|
||||
if evaluation == "negative":
|
||||
break
|
||||
|
||||
self.flow.state.ko_criteria_answers = arguments.form_values
|
||||
|
||||
if evaluation == "negative":
|
||||
answer = (f"We hebben de antwoorden op onze eerste vragen verwerkt. Je voldoet jammer genoeg niet aan de "
|
||||
f"minimale vereisten voor deze job.")
|
||||
if arguments.language != 'nl':
|
||||
answer = TranslationServices.translate(answer, arguments.language)
|
||||
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)
|
||||
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=answer,
|
||||
form_request=None,
|
||||
phase="no_valid_candidate")
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "no_valid_candidate"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version)
|
||||
else:
|
||||
answer = (f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. "
|
||||
f"Ben je bereid je contactgegevens door te geven, zodat we je kunnen contacteren voor een verder "
|
||||
f"gesprek?")
|
||||
# Check if answers to questions are positive
|
||||
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language)
|
||||
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
|
||||
if rag_output:
|
||||
answer = f"{answer}\n\n{rag_output.answer}"
|
||||
answer = (f"{answer}\n\n"
|
||||
f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} "
|
||||
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "personal_contact_data_preparation"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
|
||||
return results
|
||||
|
||||
def execute_personal_contact_data_preparation(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist personal_contact_data_preparation started", {})
|
||||
|
||||
if HumanAnswerServices.check_affirmative_answer(self.tenant_id, CONTACT_DATA_QUESTION,
|
||||
arguments.question, arguments.language):
|
||||
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0")
|
||||
if arguments.language != 'nl':
|
||||
answer = TranslationServices.translate(answer, arguments.language)
|
||||
if arguments.language != 'en':
|
||||
contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields", arguments.language)
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=answer,
|
||||
form_request=contact_form,
|
||||
phase="personal_contact_data")
|
||||
contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields",
|
||||
arguments.language)
|
||||
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
|
||||
if rag_output:
|
||||
answer = f"{rag_output.answer}"
|
||||
else:
|
||||
answer = ""
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.form_request = contact_form
|
||||
self.flow.state.phase = "personal_contact_data_processing"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
else:
|
||||
answer = TranslationServices.translate(self.tenant_id, NO_CONTACT_DATA_QUESTION, arguments.language)
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "personal_contact_data_preparation"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
|
||||
return results
|
||||
|
||||
def execute_personal_contact_data(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist personal_contact_data started", {})
|
||||
def execute_personal_contact_data_processing(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist personal_contact_data_processing started", {})
|
||||
answer = (
|
||||
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, arguments.language)}\n"
|
||||
f"{TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, arguments.language)}")
|
||||
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
|
||||
phase="candidate_selected")
|
||||
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
|
||||
if rag_output:
|
||||
answer = f"{answer}\n\n{rag_output.answer}"
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "contact_time_evaluation"
|
||||
self.flow.state.personal_contact_data = arguments.form_values
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
return results
|
||||
|
||||
def execute_no_valid_candidate(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
def execute_contact_time_evaluation_state(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist contact_time_evaluation started", {})
|
||||
contact_time_answer = HumanAnswerServices.get_answer_to_question(self.tenant_id, CONTACT_TIME_QUESTION,
|
||||
arguments.question, arguments.language)
|
||||
|
||||
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
|
||||
if contact_time_answer == "No answer provided":
|
||||
answer = TranslationServices.translate(self.tenant_id, NO_CONTACT_TIME_MESSAGE, arguments.language)
|
||||
if rag_output:
|
||||
answer = f"{answer}\n\n{rag_output.answer}"
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "contact_time_evaluation"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
else:
|
||||
answer = TranslationServices.translate(self.tenant_id, CONTACT_TIME_PROCESSED_MESSAGE, arguments.language)
|
||||
if rag_output:
|
||||
answer = f"{answer}\n\n{rag_output.answer}"
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "candidate_selected"
|
||||
self.flow.state.contact_time = contact_time_answer
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
|
||||
return results
|
||||
|
||||
def execute_no_valid_candidate_state(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=f"Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job. Maar solliciteer gerust voor één van onze andere jobs.",
|
||||
phase="no_valid_candidate")
|
||||
answer = (f"{TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)}\n"
|
||||
f"{TranslationServices.translate(self.tenant_id, NO_FURTHER_QUESTIONS_MESSAGE, arguments.language)}\n")
|
||||
|
||||
def execute_candidate_selected(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist candidate_selected started", {})
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=f"We hebben je contactgegegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
|
||||
phase="candidate_selected")
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "no_valid_candidate"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
return results
|
||||
|
||||
def execute_candidate_selected_state(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist candidate_selected started", {})
|
||||
answer = TranslationServices.translate(self.tenant_id, SUCCESSFUL_ENDING_MESSAGE, arguments.language)
|
||||
|
||||
class SelectionInput(BaseModel):
|
||||
region: str = Field(..., alias="region")
|
||||
working_schedule: Optional[str] = Field(..., alias="working_schedule")
|
||||
start_date: Optional[date] = Field(None, alias="vacancy_text")
|
||||
language: Optional[str] = Field(None, alias="language")
|
||||
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
|
||||
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
|
||||
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
|
||||
language_level: Optional[str] = Field(None, alias="language_level")
|
||||
language_level_context: Optional[str] = Field(None, alias="language_level_context")
|
||||
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
|
||||
question: Optional[str] = Field(None, alias="question")
|
||||
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "candidate_selected"
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
return results
|
||||
|
||||
def execute_rag_state(self, arguments: SpecialistArguments, formatted_context, citations, welcome_message=None) \
|
||||
-> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist rag_state started", {})
|
||||
|
||||
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION,
|
||||
arguments.language)
|
||||
if welcome_message:
|
||||
answer = f"{welcome_message}\n\n{start_selection_question}"
|
||||
else:
|
||||
answer = ""
|
||||
|
||||
rag_results = None
|
||||
if arguments.question:
|
||||
if HumanAnswerServices.check_additional_information(self.tenant_id,
|
||||
START_SELECTION_QUESTION,
|
||||
arguments.question,
|
||||
arguments.language):
|
||||
rag_results = self.execute_rag(arguments, formatted_context, citations)
|
||||
self.flow.state.rag_output = rag_results.rag_output
|
||||
answer = f"{answer}\n{rag_results.answer}"
|
||||
|
||||
if HumanAnswerServices.check_affirmative_answer(self.tenant_id,
|
||||
START_SELECTION_QUESTION,
|
||||
arguments.question,
|
||||
arguments.language):
|
||||
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer)
|
||||
|
||||
self.flow.state.answer = answer
|
||||
self.flow.state.phase = "rag"
|
||||
self.flow.state.form_request = None
|
||||
|
||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||
return results
|
||||
|
||||
def execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput:
|
||||
self.log_tuning("RAG Specialist execution started", {})
|
||||
|
||||
insufficient_info_message = TranslationServices.translate(self.tenant_id,
|
||||
INSUFFICIENT_INFORMATION_MESSAGE,
|
||||
arguments.language)
|
||||
if formatted_context:
|
||||
flow_inputs = {
|
||||
"language": arguments.language,
|
||||
"question": arguments.question,
|
||||
"context": formatted_context,
|
||||
"history": self.formatted_history,
|
||||
"name": self.specialist.configuration.get('name', ''),
|
||||
}
|
||||
rag_output = self.flow.kickoff(inputs=flow_inputs)
|
||||
if rag_output.rag_output.insufficient_info:
|
||||
rag_output.rag_output.answer = insufficient_info_message
|
||||
else:
|
||||
rag_output = RAGOutput(answer=insufficient_info_message,
|
||||
insufficient_info=True)
|
||||
|
||||
self.log_tuning(f"RAG Specialist execution ended", {"Results": rag_output.model_dump()})
|
||||
|
||||
return rag_output
|
||||
|
||||
def _check_and_execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput:
|
||||
if HumanAnswerServices.check_additional_information(self.tenant_id,
|
||||
START_SELECTION_QUESTION,
|
||||
arguments.question,
|
||||
arguments.language):
|
||||
results = self.execute_rag(arguments, formatted_context, citations)
|
||||
return results
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_ko_questions(self) -> KOQuestions:
|
||||
ko_questions_asset = db.session.query(EveAIAsset).filter(
|
||||
EveAIAsset.type == "TRAICIE_KO_CRITERIA_QUESTIONS",
|
||||
EveAIAsset.type_version == "1.0.0",
|
||||
EveAIAsset.configuration.is_not(None),
|
||||
EveAIAsset.configuration.has_key('specialist_id'),
|
||||
EveAIAsset.configuration['specialist_id'].astext.cast(db.Integer) == self.specialist_id
|
||||
).first()
|
||||
|
||||
if not ko_questions_asset:
|
||||
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
|
||||
"No KO criteria questions found")
|
||||
|
||||
ko_questions_data = minio_client.download_asset_file(self.tenant_id, ko_questions_asset.bucket_name,
|
||||
ko_questions_asset.object_name)
|
||||
ko_questions = KOQuestions.from_json(ko_questions_data)
|
||||
current_app.logger.debug(f"KO Questions: {ko_questions}")
|
||||
|
||||
return ko_questions
|
||||
|
||||
|
||||
class SelectionKOCriteriumScore(BaseModel):
|
||||
@@ -285,12 +451,6 @@ class SelectionKOCriteriumScore(BaseModel):
|
||||
score: Optional[int] = Field(None, alias="score")
|
||||
|
||||
|
||||
class SelectionCompetencyScore(BaseModel):
|
||||
competency: Optional[str] = Field(None, alias="competency")
|
||||
answer: Optional[str] = Field(None, alias="answer")
|
||||
score: Optional[int] = Field(None, alias="score")
|
||||
|
||||
|
||||
class PersonalContactData(BaseModel):
|
||||
name: str = Field(..., description="Your name", alias="name")
|
||||
email: EmailStr = Field(..., description="Your Name", alias="email")
|
||||
@@ -302,34 +462,51 @@ class PersonalContactData(BaseModel):
|
||||
consent: bool = Field(..., description="Consent", alias="consent")
|
||||
|
||||
|
||||
class SelectionResult(SpecialistResult):
|
||||
ko_criteria_questions: Optional[List[ListItem]] = Field(None, alias="ko_criteria_questions")
|
||||
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
|
||||
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
|
||||
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
|
||||
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
|
||||
class SelectionInput(BaseModel):
|
||||
# RAG elements
|
||||
language: Optional[str] = Field(None, alias="language")
|
||||
question: Optional[str] = Field(None, alias="query")
|
||||
context: Optional[str] = Field(None, alias="context")
|
||||
citations: Optional[List[int]] = Field(None, alias="citations")
|
||||
history: Optional[str] = Field(None, alias="history")
|
||||
name: Optional[str] = Field(None, alias="name")
|
||||
# Selection elements
|
||||
region: str = Field(..., alias="region")
|
||||
working_schedule: Optional[str] = Field(..., alias="working_schedule")
|
||||
start_date: Optional[date] = Field(None, alias="vacancy_text")
|
||||
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
|
||||
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
|
||||
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
|
||||
language_level: Optional[str] = Field(None, alias="language_level")
|
||||
language_level_context: Optional[str] = Field(None, alias="language_level_context")
|
||||
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
|
||||
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
|
||||
|
||||
|
||||
class SelectionFlowState(EveAIFlowState):
|
||||
"""Flow state for Traicie Role Definition specialist that automatically updates from task outputs"""
|
||||
"""Flow state for RAG specialist that automatically updates from task outputs"""
|
||||
input: Optional[SelectionInput] = None
|
||||
ko_criteria_questions: Optional[List[KOQuestion]] = Field(None, alias="ko_criteria_questions")
|
||||
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
|
||||
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
|
||||
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
|
||||
rag_output: Optional[RAGOutput] = None
|
||||
ko_criteria_answers: Optional[Dict[str, str]] = None
|
||||
personal_contact_data: Optional[PersonalContactData] = None
|
||||
contact_time: Optional[str] = None
|
||||
|
||||
|
||||
class SelectionResult(SpecialistResult):
|
||||
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
|
||||
ko_criteria_answers: Optional[Dict[str, str]] = Field(None, alias="ko_criteria_answers")
|
||||
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
|
||||
phase: Optional[str] = Field(None, alias="phase")
|
||||
interaction_mode: Optional[str] = Field(None, alias="mode")
|
||||
contact_time: Optional[str] = None
|
||||
|
||||
|
||||
class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
|
||||
def __init__(self,
|
||||
specialist_executor: CrewAIBaseSpecialistExecutor,
|
||||
ko_def_crew: EveAICrewAICrew,
|
||||
rag_crew: EveAICrewAICrew,
|
||||
**kwargs):
|
||||
super().__init__(specialist_executor, "Traicie Role Definition Specialist Flow", **kwargs)
|
||||
super().__init__(specialist_executor, "Selection Specialist Flow", **kwargs)
|
||||
self.specialist_executor = specialist_executor
|
||||
self.ko_def_crew = ko_def_crew
|
||||
self.rag_crew = rag_crew
|
||||
self.exception_raised = False
|
||||
|
||||
@start()
|
||||
@@ -337,34 +514,24 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
|
||||
return ""
|
||||
|
||||
@listen(process_inputs)
|
||||
async def execute_ko_def_definition(self):
|
||||
async def execute_rag(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
try:
|
||||
current_app.logger.debug("execute_ko_interview_definition")
|
||||
crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs)
|
||||
# Unfortunately, crew_output will only contain the output of the latest task.
|
||||
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
||||
# are copies to the flow state.
|
||||
update = {}
|
||||
for task in self.ko_def_crew.tasks:
|
||||
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
|
||||
if task.name == "traicie_ko_criteria_interview_definition_task":
|
||||
# update["competencies"] = task.output.pydantic.competencies
|
||||
self.state.ko_criteria_questions = task.output.pydantic.ko_questions
|
||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||
self.state.phase = "personal_contact_data"
|
||||
current_app.logger.debug(f"State after execute_ko_def_definition: {self.state}")
|
||||
current_app.logger.debug(f"State dump after execute_ko_def_definition: {self.state.model_dump()}")
|
||||
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
|
||||
self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump())
|
||||
output_pydantic = crew_output.pydantic
|
||||
if not output_pydantic:
|
||||
raw_json = json.loads(crew_output.raw)
|
||||
output_pydantic = RAGOutput.model_validate(raw_json)
|
||||
self.state.rag_output = output_pydantic
|
||||
return crew_output
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}")
|
||||
current_app.logger.error(f"CREW rag_crew Kickoff Error: {str(e)}")
|
||||
self.exception_raised = True
|
||||
raise e
|
||||
|
||||
async def kickoff_async(self, inputs=None):
|
||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
||||
current_app.logger.debug(f"Inputs: {inputs}")
|
||||
self.state.input = SelectionInput.model_validate(inputs)
|
||||
current_app.logger.debug(f"State: {self.state}")
|
||||
result = await super().kickoff_async(inputs)
|
||||
return self.state
|
||||
|
||||
Reference in New Issue
Block a user