- Full Traicie Selection Specialist Flow implemented

- Added Specialist basics for handling phases and automatically transferring data between state and output
- Added QR-code generation for Magic Links
This commit is contained in:
Josako
2025-06-23 11:46:56 +02:00
parent 5b2c04501c
commit 7b87880045
10 changed files with 272 additions and 33 deletions

View File

@@ -248,3 +248,14 @@ class EveAIPendingLicensePeriod(EveAIException):
message = f"Basic Fee Payment has not been received yet. Please ensure payment has been made, and please wait for payment to be processed." message = f"Basic Fee Payment has not been received yet. Please ensure payment has been made, and please wait for payment to be processed."
super().__init__(message, status_code, payload) super().__init__(message, status_code, payload)
class EveAISpecialistExecutionError(EveAIException):
"""Raised when an error occurs during specialist execution"""
def __init__(self, tenant_id, specialist_id, session_id, details, status_code=400, payload=None):
message = (f"Error during specialist {specialist_id} execution \n"
f"with Session ID {session_id} \n"
f"for Tenant {tenant_id}. \n"
f"Details: {details} \n"
f"The System Administrator has been notified. Please try again later.")
super().__init__(message, status_code, payload)

View File

@@ -16,10 +16,10 @@ backstory: >
AI-driven sourcing. Youre more than a recruiter—youre a trusted advisor, a brand ambassador, and a connector of AI-driven sourcing. Youre more than a recruiter—youre a trusted advisor, a brand ambassador, and a connector of
people and purpose. people and purpose.
{custom_backstory} {custom_backstory}
full_model_name: "mistral.mistral-medium-latest" full_model_name: "mistral.magistral-medium-latest"
temperature: 0.3 temperature: 0.3
metadata: metadata:
author: "Josako" author: "Josako"
date_added: "2025-06-18" date_added: "2025-06-18"
description: "HR BP Agent." description: "Traicie Recruiter Agent"
changes: "Initial version" changes: "Initial version"

View File

@@ -275,6 +275,7 @@ class DevConfig(Config):
# Define the nginx prefix used for the specific apps # Define the nginx prefix used for the specific apps
EVEAI_APP_LOCATION_PREFIX = '/admin' EVEAI_APP_LOCATION_PREFIX = '/admin'
EVEAI_CHAT_LOCATION_PREFIX = '/chat' EVEAI_CHAT_LOCATION_PREFIX = '/chat'
CHAT_CLIENT_PREFIX = 'chat-client/chat/'
# file upload settings # file upload settings
# UPLOAD_FOLDER = '/app/tenant_files' # UPLOAD_FOLDER = '/app/tenant_files'

View File

@@ -0,0 +1,37 @@
version: "1.0.0"
name: "KO Criteria Interview Definition"
task_description: >
In context of a vacancy in your company {tenant_name}, you are provided with a set of competencies
(both description and title). The competencies are in between triple backquotes. The competencies provided should be
handled as knock-out criteria.
For each of the knock-out criteria, you need to define
- A short (1 sentence), closed-ended question (Yes / No) to ask the recruitment candidate. Use your experience to ask a question that
enables us to verify compliancy to the criterium.
- A set of 2 short answers (1 small sentence each) to that question (positive answer / negative answer), from the
candidates perspective.
The positive answer will result in a positive evaluation of the criterium, the negative answer in a negative evaluation
of the criterium. Try to avoid just using Yes / No as positive and negative answers.
Apply the following tone of voice in both questions and answers: {tone_of_voice}, i.e. {tone_of_voice_context}
Apply the following language level in both questions and answers: {language_level}, i.e. {language_level_context}
Use {language} as language for both questions and answers.
```{ko_criteria}```
{custom_description}
expected_output: >
For each of the ko criteria, you provide:
- the exact title as specified in the original language
- the question in {language}
- a positive answer, resulting in a positive evaluation of the criterium. In {language}.
- a negative answer, resulting in a negative evaluation of the criterium. In {language}.
{custom_expected_output}
metadata:
author: "Josako"
date_added: "2025-06-20"
description: "A Task to define interview Q&A from given KO Criteria"
changes: "Improvement to ensure closed-ended questions and short descriptions"

View File

@@ -9,11 +9,30 @@
{% block content %} {% block content %}
<form method="post"> <form method="post">
{{ form.hidden_tag() }} {{ form.hidden_tag() }}
{% set disabled_fields = ['magic_link_code'] %} {% set disabled_fields = ['magic_link_code', 'chat_client_url', 'qr_code_url'] %}
{% set exclude_fields = [] %} {% set exclude_fields = [] %}
<!-- Render Static Fields --> <!-- Render Static Fields -->
{% for field in form.get_static_fields() %} {% for field in form.get_static_fields() %}
{{ render_field(field, disabled_fields, exclude_fields) }} {% if field.name == 'qr_code_url' and field.data %}
<div class="form-group">
<label for="{{ field.id }}">{{ field.label.text }}</label>
<div style="max-width: 200px;">
<img src="{{ field.data }}" alt="QR Code" class="img-fluid">
</div>
<input type="hidden" name="{{ field.name }}" value="{{ field.data|e }}">
</div>
{% elif field.name == 'chat_client_url' %}
<div class="form-group">
<label for="{{ field.id }}" class="form-label">{{ field.label.text }}</label>
<div class="input-group">
<input type="text" class="form-control" value="{{ field.data }}" id="{{ field.id }}" readonly>
<a href="{{ field.data }}" class="btn btn-primary" target="_blank">Open link</a>
</div>
<input type="hidden" name="{{ field.name }}" value="{{ field.data|e }}">
</div>
{% else %}
{{ render_field(field, disabled_fields, exclude_fields) }}
{% endif %}
{% endfor %} {% endfor %}
<!-- Render Dynamic Fields --> <!-- Render Dynamic Fields -->
{% for collection_name, fields in form.get_dynamic_fields().items() %} {% for collection_name, fields in form.get_dynamic_fields().items() %}

View File

@@ -162,6 +162,8 @@ class EditSpecialistMagicLinkForm(DynamicFormBase):
render_kw={'readonly': True}) render_kw={'readonly': True})
specialist_id = IntegerField('Specialist', validators=[DataRequired()], render_kw={'readonly': True}) specialist_id = IntegerField('Specialist', validators=[DataRequired()], render_kw={'readonly': True})
specialist_name = StringField('Specialist Name', validators=[DataRequired()], render_kw={'readonly': True}) specialist_name = StringField('Specialist Name', validators=[DataRequired()], render_kw={'readonly': True})
chat_client_url = StringField('Chat Client URL', validators=[Optional()], render_kw={'readonly': True})
qr_code_url = StringField('QR Code', validators=[Optional()], render_kw={'readonly': True})
tenant_make_id = SelectField('Tenant Make', validators=[Optional()], coerce=int) tenant_make_id = SelectField('Tenant Make', validators=[Optional()], coerce=int)
valid_from = DateField('Valid From', id='form-control datepicker', validators=[Optional()]) valid_from = DateField('Valid From', id='form-control datepicker', validators=[Optional()])
valid_to = DateField('Valid To', id='form-control datepicker', validators=[Optional()]) valid_to = DateField('Valid To', id='form-control datepicker', validators=[Optional()])

View File

@@ -748,6 +748,56 @@ def edit_specialist_magic_link(specialist_magic_link_id):
else: else:
form.tenant_make_id.data = specialist_ml.tenant_make_id form.tenant_make_id.data = specialist_ml.tenant_make_id
# Set the chat client URL
tenant_id = session.get('tenant').get('id')
chat_client_prefix = current_app.config.get('CHAT_CLIENT_PREFIX', 'chat_client/chat/')
base_url = request.url_root
magic_link_code = specialist_ml.magic_link_code
# Parse the URL om poortinformatie te behouden als deze afwijkt van de standaard
url_parts = request.url.split('/')
host_port = url_parts[2] # Dit bevat zowel hostname als poort indien aanwezig
# Generate the full URL for chat client with magic link code
chat_client_url = f"{request.scheme}://{host_port}/{chat_client_prefix}{magic_link_code}"
form.chat_client_url.data = chat_client_url
# Generate QR code as data URI for direct embedding in HTML
try:
import qrcode
import io
import base64
# Generate QR code as PNG for better compatibility
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4
)
qr.add_data(chat_client_url)
qr.make(fit=True)
# Generate PNG image in memory
img = qr.make_image(fill_color="black", back_color="white")
buffer = io.BytesIO()
img.save(buffer, format='PNG')
img_data = buffer.getvalue()
# Create data URI for direct embedding in HTML
img_base64 = base64.b64encode(img_data).decode('utf-8')
data_uri = f"data:image/png;base64,{img_base64}"
# Store the data URI in the form data
form.qr_code_url.data = data_uri
current_app.logger.debug(f"QR code generated successfully for {magic_link_code}")
current_app.logger.debug(f"QR code data URI starts with: {data_uri[:50]}...")
except Exception as e:
current_app.logger.error(f"Failed to generate QR code: {str(e)}")
form.qr_code_url.data = "Error generating QR code"
if form.validate_on_submit(): if form.validate_on_submit():
# Update the basic fields # Update the basic fields
form.populate_obj(specialist_ml) form.populate_obj(specialist_ml)

View File

@@ -50,15 +50,20 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
self._task_pydantic_outputs: Dict[str, Type[BaseModel]] = {} self._task_pydantic_outputs: Dict[str, Type[BaseModel]] = {}
self._task_state_names: Dict[str, str] = {} self._task_state_names: Dict[str, str] = {}
# Processed configurations # State-Result relations (for adding / restoring information to / from history
self._state_result_relations: Dict[str, str] = {}
# Process configurations
self._config = cache_manager.crewai_processed_config_cache.get_specialist_config(tenant_id, specialist_id) self._config = cache_manager.crewai_processed_config_cache.get_specialist_config(tenant_id, specialist_id)
self._config_task_agents() self._config_task_agents()
self._config_pydantic_outputs() self._config_pydantic_outputs()
self._instantiate_crew_assets() self._instantiate_crew_assets()
self._instantiate_specialist() self._instantiate_specialist()
self._config_state_result_relations()
# Retrieve history # Retrieve history
self._cached_session = cache_manager.chat_session_cache.get_cached_session(self.session_id) self._cached_session = cache_manager.chat_session_cache.get_cached_session(self.session_id)
self._restore_state_from_history()
# Format history for the prompt # Format history for the prompt
self._formatted_history = self._generate_formatted_history() self._formatted_history = self._generate_formatted_history()
@@ -106,6 +111,19 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
"""Configure the task pydantic outputs by adding task-output combinations. Use _add_pydantic_output()""" """Configure the task pydantic outputs by adding task-output combinations. Use _add_pydantic_output()"""
raise NotImplementedError raise NotImplementedError
def _add_state_result_relation(self, state_name: str, result_name: str = None):
"""Add a state-result relation to the specialist. This is used to add information to the history
If result_name is None, the state name is used as the result name. (default behavior)
"""
if not result_name:
result_name = state_name
self._state_result_relations[state_name] = result_name
@abstractmethod
def _config_state_result_relations(self):
"""Configure the state-result relations by adding state-result combinations. Use _add_state_result_relation()"""
raise NotImplementedError
@property @property
def task_pydantic_outputs(self): def task_pydantic_outputs(self):
return self._task_pydantic_outputs return self._task_pydantic_outputs
@@ -330,6 +348,27 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
return formatted_context, citations return formatted_context, citations
def _update_specialist_results(self, specialist_results: SpecialistResult) -> SpecialistResult:
"""Update the specialist results with the latest state information"""
update_data = {}
state_dict = self.flow.state.model_dump()
for state_name, result_name in self._state_result_relations.items():
if state_name in state_dict and state_dict[state_name] is not None:
update_data[result_name] = state_dict[state_name]
return specialist_results.model_copy(update=update_data)
def _restore_state_from_history(self):
"""Restore the state from the history"""
if not self._cached_session.interactions:
return
last_interaction = self._cached_session.interactions[-1]
if not last_interaction.specialist_results:
return
for state_name, result_name in self._state_result_relations.items():
if result_name in last_interaction.specialist_results:
setattr(self.flow.state, state_name, last_interaction.specialist_results[result_name])
@abstractmethod @abstractmethod
def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult: def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult:
raise NotImplementedError raise NotImplementedError
@@ -356,8 +395,10 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
"detailed_query": detailed_query, "detailed_query": detailed_query,
"citations": citations, "citations": citations,
} }
final_result = result.model_copy(update=modified_result) intermediate_result = result.model_copy(update=modified_result)
else: else:
final_result = self.execute(arguments, "", []) intermediate_result = self.execute(arguments, "", [])
final_result = self._update_specialist_results(intermediate_result)
return final_result return final_result

View File

@@ -22,6 +22,7 @@ from common.services.interaction.specialist_services import SpecialistServices
from common.extensions import cache_manager from common.extensions import cache_manager
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE
from common.utils.eveai_exceptions import EveAISpecialistExecutionError
class SpecialistExecutor(CrewAIBaseSpecialistExecutor): class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
@@ -53,6 +54,13 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def _config_pydantic_outputs(self): def _config_pydantic_outputs(self):
self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions") self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions")
def _config_state_result_relations(self):
self._add_state_result_relation("ko_criteria_questions")
self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("competency_questions")
self._add_state_result_relation("competency_scores")
self._add_state_result_relation("personal_contact_data")
def _instantiate_specialist(self): def _instantiate_specialist(self):
verbose = self.tuning verbose = self.tuning
@@ -89,16 +97,14 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
match specialist_phase: match specialist_phase:
case "initial": case "initial":
results = self.execute_initial_state(arguments, formatted_context, citations) results = self.execute_initial_state(arguments, formatted_context, citations)
case "ko_questions": case "ko_question_evaluation":
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0") results = self.execute_ko_question_evaluation(arguments, formatted_context, citations)
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?",
form_request=contact_form,
phase="personal_contact_data")
case "personal_contact_data": case "personal_contact_data":
results = SpecialistResult.create_for_type(self.type, self.type_version, results = self.execute_personal_contact_data(arguments, formatted_context, citations)
answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.", case "no_valid_candidate":
phase="candidate_selected") results = self.execute_no_valid_candidate(arguments, formatted_context, citations)
case "candidate_selected":
results = self.execute_candidate_selected(arguments, formatted_context, citations)
self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"}) self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"})
@@ -108,18 +114,30 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist initial_state_execution started", {}) self.log_tuning("Traicie Selection Specialist initial_state_execution started", {})
knockout_competencies = [ current_app.logger.debug(f"Specialist Competencies:\n{self.specialist.configuration.get("competencies", [])}")
{
"title": c["title"],
"description": c["description"]
}
for c in self.specialist.configuration.get("competencies", [])
if c.get("is_knockout") is True
]
# Convert TONE_OF_VOICE en LANGUAGE_LEVEL lists tp strings usable by the LLM ko_competencies = []
tone_of_voice_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nWhen to use: {item['when_to_use']}" for item in TONE_OF_VOICE]) for competency in self.specialist.configuration.get("competencies", []):
language_level_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nCEFR level: {item['cefr_level']}\nIdeal Target Audience: {item['ideal_audience']}" for item in LANGUAGE_LEVEL]) if competency["is_knockout"] is True and competency["assess"] is True:
current_app.logger.debug(f"Assessable Knockout competency: {competency}")
ko_competencies.append({"title: ": competency["title"], "description": competency["description"]})
tone_of_voice = self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
selected_tone_of_voice = next(
(item for item in TONE_OF_VOICE if item["name"] == tone_of_voice),
None # fallback indien niet gevonden
)
current_app.logger.debug(f"Selected tone of voice: {selected_tone_of_voice}")
tone_of_voice_context = f"{selected_tone_of_voice["description"]}"
language_level = self.specialist.configuration.get('language_level', 'Standard')
selected_language_level = next(
(item for item in LANGUAGE_LEVEL if item["name"] == language_level),
None
)
current_app.logger.debug(f"Selected language level: {selected_language_level}")
language_level_context = (f"{selected_language_level['description']}, "
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
flow_inputs = { flow_inputs = {
"region": arguments.region, "region": arguments.region,
@@ -127,11 +145,11 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
"start_date": arguments.start_date, "start_date": arguments.start_date,
"language": arguments.language, "language": arguments.language,
"interaction_mode": arguments.interaction_mode, "interaction_mode": arguments.interaction_mode,
'tone_of_voice': self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral'), 'tone_of_voice': tone_of_voice,
'tone_of_voice_context': tone_of_voice_str, 'tone_of_voice_context': tone_of_voice_context,
'language_level': self.specialist.configuration.get('language_level', 'Standard'), 'language_level': language_level,
'language_level_context': language_level_str, 'language_level_context': language_level_context,
'ko_criteria': knockout_competencies, 'ko_criteria': ko_competencies,
} }
flow_results = self.flow.kickoff(inputs=flow_inputs) flow_results = self.flow.kickoff(inputs=flow_inputs)
@@ -162,10 +180,69 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
results = SpecialistResult.create_for_type(self.type, self.type_version, results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We starten met een aantal KO Criteria vragen", answer=f"We starten met een aantal KO Criteria vragen",
form_request=ko_form, form_request=ko_form,
phase="ko_questions") phase="ko_question_evaluation")
return results return results
def execute_ko_question_evaluation(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist ko_question_evaluation started", {})
# Check if the form has been returned (it should)
if not arguments.form_values:
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id, "No form values returned")
current_app.logger.debug(f"Form values: {arguments.form_values}")
# Load the previous KO Questions
previous_ko_questions = self.flow.state.ko_criteria_questions
current_app.logger.debug(f"Previous KO Questions: {previous_ko_questions}")
# Evaluate KO Criteria
evaluation = "positive"
for criterium, answer in arguments.form_values.items():
for qa in previous_ko_questions:
if qa.get("title") == criterium:
if qa.get("answer_positive") != answer:
evaluation = "negative"
break
if evaluation == "negative":
break
if evaluation == "negative":
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job.",
form_request=None,
phase="no_valid_candidate")
else:
# Check if answers to questions are positive
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0")
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?",
form_request=contact_form,
phase="personal_contact_data")
return results
def execute_personal_contact_data(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data started", {})
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
phase="candidate_selected")
return results
def execute_no_valid_candidate(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job. Maar solliciteer gerust voor één van onze andere jobs.",
phase="no_valid_candidate")
def execute_candidate_selected(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist candidate_selected started", {})
results = SpecialistResult.create_for_type(self.type, self.type_version,
answer=f"We hebben je contactgegegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
phase="candidate_selected")
return results
class SelectionInput(BaseModel): class SelectionInput(BaseModel):
region: str = Field(..., alias="region") region: str = Field(..., alias="region")

View File

@@ -94,3 +94,4 @@ scaleway~=2.9.0
html2text~=2025.4.15 html2text~=2025.4.15
markdown~=3.8 markdown~=3.8
python-json-logger~=2.0.7 python-json-logger~=2.0.7
qrcode[pil]==8.2