diff --git a/common/utils/eveai_exceptions.py b/common/utils/eveai_exceptions.py index 0c912c3..ff6793b 100644 --- a/common/utils/eveai_exceptions.py +++ b/common/utils/eveai_exceptions.py @@ -248,3 +248,14 @@ class EveAIPendingLicensePeriod(EveAIException): message = f"Basic Fee Payment has not been received yet. Please ensure payment has been made, and please wait for payment to be processed." super().__init__(message, status_code, payload) + +class EveAISpecialistExecutionError(EveAIException): + """Raised when an error occurs during specialist execution""" + + def __init__(self, tenant_id, specialist_id, session_id, details, status_code=400, payload=None): + message = (f"Error during specialist {specialist_id} execution \n" + f"with Session ID {session_id} \n" + f"for Tenant {tenant_id}. \n" + f"Details: {details} \n" + f"The System Administrator has been notified. Please try again later.") + super().__init__(message, status_code, payload) diff --git a/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml b/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml index 285b840..752a57b 100644 --- a/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml +++ b/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml @@ -16,10 +16,10 @@ backstory: > AI-driven sourcing. You’re more than a recruiter—you’re a trusted advisor, a brand ambassador, and a connector of people and purpose. {custom_backstory} -full_model_name: "mistral.mistral-medium-latest" +full_model_name: "mistral.magistral-medium-latest" temperature: 0.3 metadata: author: "Josako" date_added: "2025-06-18" - description: "HR BP Agent." + description: "Traicie Recruiter Agent" changes: "Initial version" diff --git a/config/config.py b/config/config.py index 0c59353..abd86a7 100644 --- a/config/config.py +++ b/config/config.py @@ -275,6 +275,7 @@ class DevConfig(Config): # Define the nginx prefix used for the specific apps EVEAI_APP_LOCATION_PREFIX = '/admin' EVEAI_CHAT_LOCATION_PREFIX = '/chat' + CHAT_CLIENT_PREFIX = 'chat-client/chat/' # file upload settings # UPLOAD_FOLDER = '/app/tenant_files' diff --git a/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml b/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml new file mode 100644 index 0000000..e0dd724 --- /dev/null +++ b/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml @@ -0,0 +1,37 @@ +version: "1.0.0" +name: "KO Criteria Interview Definition" +task_description: > + In context of a vacancy in your company {tenant_name}, you are provided with a set of competencies + (both description and title). The competencies are in between triple backquotes. The competencies provided should be + handled as knock-out criteria. + For each of the knock-out criteria, you need to define + + - A short (1 sentence), closed-ended question (Yes / No) to ask the recruitment candidate. Use your experience to ask a question that + enables us to verify compliancy to the criterium. + - A set of 2 short answers (1 small sentence each) to that question (positive answer / negative answer), from the + candidates perspective. + The positive answer will result in a positive evaluation of the criterium, the negative answer in a negative evaluation + of the criterium. Try to avoid just using Yes / No as positive and negative answers. + + Apply the following tone of voice in both questions and answers: {tone_of_voice}, i.e. {tone_of_voice_context} + + Apply the following language level in both questions and answers: {language_level}, i.e. {language_level_context} + + Use {language} as language for both questions and answers. + + ```{ko_criteria}``` + + {custom_description} + +expected_output: > + For each of the ko criteria, you provide: + - the exact title as specified in the original language + - the question in {language} + - a positive answer, resulting in a positive evaluation of the criterium. In {language}. + - a negative answer, resulting in a negative evaluation of the criterium. In {language}. + {custom_expected_output} +metadata: + author: "Josako" + date_added: "2025-06-20" + description: "A Task to define interview Q&A from given KO Criteria" + changes: "Improvement to ensure closed-ended questions and short descriptions" diff --git a/eveai_app/templates/interaction/edit_specialist_magic_link.html b/eveai_app/templates/interaction/edit_specialist_magic_link.html index afc2195..0daa518 100644 --- a/eveai_app/templates/interaction/edit_specialist_magic_link.html +++ b/eveai_app/templates/interaction/edit_specialist_magic_link.html @@ -9,11 +9,30 @@ {% block content %}
{{ form.hidden_tag() }} - {% set disabled_fields = ['magic_link_code'] %} + {% set disabled_fields = ['magic_link_code', 'chat_client_url', 'qr_code_url'] %} {% set exclude_fields = [] %} {% for field in form.get_static_fields() %} - {{ render_field(field, disabled_fields, exclude_fields) }} + {% if field.name == 'qr_code_url' and field.data %} +
+ +
+ QR Code +
+ +
+ {% elif field.name == 'chat_client_url' %} +
+ +
+ + Open link +
+ +
+ {% else %} + {{ render_field(field, disabled_fields, exclude_fields) }} + {% endif %} {% endfor %} {% for collection_name, fields in form.get_dynamic_fields().items() %} diff --git a/eveai_app/views/interaction_forms.py b/eveai_app/views/interaction_forms.py index 67193d4..d24a661 100644 --- a/eveai_app/views/interaction_forms.py +++ b/eveai_app/views/interaction_forms.py @@ -162,6 +162,8 @@ class EditSpecialistMagicLinkForm(DynamicFormBase): render_kw={'readonly': True}) specialist_id = IntegerField('Specialist', validators=[DataRequired()], render_kw={'readonly': True}) specialist_name = StringField('Specialist Name', validators=[DataRequired()], render_kw={'readonly': True}) + chat_client_url = StringField('Chat Client URL', validators=[Optional()], render_kw={'readonly': True}) + qr_code_url = StringField('QR Code', validators=[Optional()], render_kw={'readonly': True}) tenant_make_id = SelectField('Tenant Make', validators=[Optional()], coerce=int) valid_from = DateField('Valid From', id='form-control datepicker', validators=[Optional()]) valid_to = DateField('Valid To', id='form-control datepicker', validators=[Optional()]) diff --git a/eveai_app/views/interaction_views.py b/eveai_app/views/interaction_views.py index 3d53b19..65fc846 100644 --- a/eveai_app/views/interaction_views.py +++ b/eveai_app/views/interaction_views.py @@ -748,6 +748,56 @@ def edit_specialist_magic_link(specialist_magic_link_id): else: form.tenant_make_id.data = specialist_ml.tenant_make_id + # Set the chat client URL + tenant_id = session.get('tenant').get('id') + chat_client_prefix = current_app.config.get('CHAT_CLIENT_PREFIX', 'chat_client/chat/') + base_url = request.url_root + magic_link_code = specialist_ml.magic_link_code + + # Parse the URL om poortinformatie te behouden als deze afwijkt van de standaard + url_parts = request.url.split('/') + host_port = url_parts[2] # Dit bevat zowel hostname als poort indien aanwezig + + # Generate the full URL for chat client with magic link code + chat_client_url = f"{request.scheme}://{host_port}/{chat_client_prefix}{magic_link_code}" + form.chat_client_url.data = chat_client_url + + # Generate QR code as data URI for direct embedding in HTML + try: + import qrcode + import io + import base64 + + # Generate QR code as PNG for better compatibility + qr = qrcode.QRCode( + version=1, + error_correction=qrcode.constants.ERROR_CORRECT_L, + box_size=10, + border=4 + ) + qr.add_data(chat_client_url) + qr.make(fit=True) + + # Generate PNG image in memory + img = qr.make_image(fill_color="black", back_color="white") + buffer = io.BytesIO() + img.save(buffer, format='PNG') + img_data = buffer.getvalue() + + # Create data URI for direct embedding in HTML + img_base64 = base64.b64encode(img_data).decode('utf-8') + data_uri = f"data:image/png;base64,{img_base64}" + + # Store the data URI in the form data + form.qr_code_url.data = data_uri + + current_app.logger.debug(f"QR code generated successfully for {magic_link_code}") + current_app.logger.debug(f"QR code data URI starts with: {data_uri[:50]}...") + + except Exception as e: + current_app.logger.error(f"Failed to generate QR code: {str(e)}") + form.qr_code_url.data = "Error generating QR code" + if form.validate_on_submit(): # Update the basic fields form.populate_obj(specialist_ml) diff --git a/eveai_chat_workers/specialists/crewai_base_specialist.py b/eveai_chat_workers/specialists/crewai_base_specialist.py index ab298ba..26ab800 100644 --- a/eveai_chat_workers/specialists/crewai_base_specialist.py +++ b/eveai_chat_workers/specialists/crewai_base_specialist.py @@ -50,15 +50,20 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): self._task_pydantic_outputs: Dict[str, Type[BaseModel]] = {} self._task_state_names: Dict[str, str] = {} - # Processed configurations + # State-Result relations (for adding / restoring information to / from history + self._state_result_relations: Dict[str, str] = {} + + # Process configurations self._config = cache_manager.crewai_processed_config_cache.get_specialist_config(tenant_id, specialist_id) self._config_task_agents() self._config_pydantic_outputs() self._instantiate_crew_assets() self._instantiate_specialist() + self._config_state_result_relations() # Retrieve history self._cached_session = cache_manager.chat_session_cache.get_cached_session(self.session_id) + self._restore_state_from_history() # Format history for the prompt self._formatted_history = self._generate_formatted_history() @@ -106,6 +111,19 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): """Configure the task pydantic outputs by adding task-output combinations. Use _add_pydantic_output()""" raise NotImplementedError + def _add_state_result_relation(self, state_name: str, result_name: str = None): + """Add a state-result relation to the specialist. This is used to add information to the history + If result_name is None, the state name is used as the result name. (default behavior) + """ + if not result_name: + result_name = state_name + self._state_result_relations[state_name] = result_name + + @abstractmethod + def _config_state_result_relations(self): + """Configure the state-result relations by adding state-result combinations. Use _add_state_result_relation()""" + raise NotImplementedError + @property def task_pydantic_outputs(self): return self._task_pydantic_outputs @@ -330,6 +348,27 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): return formatted_context, citations + def _update_specialist_results(self, specialist_results: SpecialistResult) -> SpecialistResult: + """Update the specialist results with the latest state information""" + update_data = {} + state_dict = self.flow.state.model_dump() + for state_name, result_name in self._state_result_relations.items(): + if state_name in state_dict and state_dict[state_name] is not None: + update_data[result_name] = state_dict[state_name] + + return specialist_results.model_copy(update=update_data) + + def _restore_state_from_history(self): + """Restore the state from the history""" + if not self._cached_session.interactions: + return + last_interaction = self._cached_session.interactions[-1] + if not last_interaction.specialist_results: + return + for state_name, result_name in self._state_result_relations.items(): + if result_name in last_interaction.specialist_results: + setattr(self.flow.state, state_name, last_interaction.specialist_results[result_name]) + @abstractmethod def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult: raise NotImplementedError @@ -356,8 +395,10 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): "detailed_query": detailed_query, "citations": citations, } - final_result = result.model_copy(update=modified_result) + intermediate_result = result.model_copy(update=modified_result) else: - final_result = self.execute(arguments, "", []) + intermediate_result = self.execute(arguments, "", []) + + final_result = self._update_specialist_results(intermediate_result) return final_result diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_3.py b/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_3.py index 367f84d..f3a5486 100644 --- a/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_3.py +++ b/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_3.py @@ -22,6 +22,7 @@ from common.services.interaction.specialist_services import SpecialistServices from common.extensions import cache_manager from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE +from common.utils.eveai_exceptions import EveAISpecialistExecutionError class SpecialistExecutor(CrewAIBaseSpecialistExecutor): @@ -53,6 +54,13 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def _config_pydantic_outputs(self): self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions") + def _config_state_result_relations(self): + self._add_state_result_relation("ko_criteria_questions") + self._add_state_result_relation("ko_criteria_scores") + self._add_state_result_relation("competency_questions") + self._add_state_result_relation("competency_scores") + self._add_state_result_relation("personal_contact_data") + def _instantiate_specialist(self): verbose = self.tuning @@ -89,16 +97,14 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): match specialist_phase: case "initial": results = self.execute_initial_state(arguments, formatted_context, citations) - case "ko_questions": - contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0") - results = SpecialistResult.create_for_type(self.type, self.type_version, - answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?", - form_request=contact_form, - phase="personal_contact_data") + case "ko_question_evaluation": + results = self.execute_ko_question_evaluation(arguments, formatted_context, citations) case "personal_contact_data": - results = SpecialistResult.create_for_type(self.type, self.type_version, - answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.", - phase="candidate_selected") + results = self.execute_personal_contact_data(arguments, formatted_context, citations) + case "no_valid_candidate": + results = self.execute_no_valid_candidate(arguments, formatted_context, citations) + case "candidate_selected": + results = self.execute_candidate_selected(arguments, formatted_context, citations) self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"}) @@ -108,18 +114,30 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: self.log_tuning("Traicie Selection Specialist initial_state_execution started", {}) - knockout_competencies = [ - { - "title": c["title"], - "description": c["description"] - } - for c in self.specialist.configuration.get("competencies", []) - if c.get("is_knockout") is True - ] + current_app.logger.debug(f"Specialist Competencies:\n{self.specialist.configuration.get("competencies", [])}") - # Convert TONE_OF_VOICE en LANGUAGE_LEVEL lists tp strings usable by the LLM - tone_of_voice_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nWhen to use: {item['when_to_use']}" for item in TONE_OF_VOICE]) - language_level_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nCEFR level: {item['cefr_level']}\nIdeal Target Audience: {item['ideal_audience']}" for item in LANGUAGE_LEVEL]) + ko_competencies = [] + for competency in self.specialist.configuration.get("competencies", []): + if competency["is_knockout"] is True and competency["assess"] is True: + current_app.logger.debug(f"Assessable Knockout competency: {competency}") + ko_competencies.append({"title: ": competency["title"], "description": competency["description"]}) + + tone_of_voice = self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral') + selected_tone_of_voice = next( + (item for item in TONE_OF_VOICE if item["name"] == tone_of_voice), + None # fallback indien niet gevonden + ) + current_app.logger.debug(f"Selected tone of voice: {selected_tone_of_voice}") + tone_of_voice_context = f"{selected_tone_of_voice["description"]}" + + language_level = self.specialist.configuration.get('language_level', 'Standard') + selected_language_level = next( + (item for item in LANGUAGE_LEVEL if item["name"] == language_level), + None + ) + current_app.logger.debug(f"Selected language level: {selected_language_level}") + language_level_context = (f"{selected_language_level['description']}, " + f"corresponding to CEFR level {selected_language_level['cefr_level']}") flow_inputs = { "region": arguments.region, @@ -127,11 +145,11 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): "start_date": arguments.start_date, "language": arguments.language, "interaction_mode": arguments.interaction_mode, - 'tone_of_voice': self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral'), - 'tone_of_voice_context': tone_of_voice_str, - 'language_level': self.specialist.configuration.get('language_level', 'Standard'), - 'language_level_context': language_level_str, - 'ko_criteria': knockout_competencies, + 'tone_of_voice': tone_of_voice, + 'tone_of_voice_context': tone_of_voice_context, + 'language_level': language_level, + 'language_level_context': language_level_context, + 'ko_criteria': ko_competencies, } flow_results = self.flow.kickoff(inputs=flow_inputs) @@ -162,10 +180,69 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): results = SpecialistResult.create_for_type(self.type, self.type_version, answer=f"We starten met een aantal KO Criteria vragen", form_request=ko_form, - phase="ko_questions") + phase="ko_question_evaluation") return results + def execute_ko_question_evaluation(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: + self.log_tuning("Traicie Selection Specialist ko_question_evaluation started", {}) + + # Check if the form has been returned (it should) + if not arguments.form_values: + raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id, "No form values returned") + current_app.logger.debug(f"Form values: {arguments.form_values}") + + # Load the previous KO Questions + previous_ko_questions = self.flow.state.ko_criteria_questions + current_app.logger.debug(f"Previous KO Questions: {previous_ko_questions}") + + # Evaluate KO Criteria + evaluation = "positive" + for criterium, answer in arguments.form_values.items(): + for qa in previous_ko_questions: + if qa.get("title") == criterium: + if qa.get("answer_positive") != answer: + evaluation = "negative" + break + if evaluation == "negative": + break + + if evaluation == "negative": + results = SpecialistResult.create_for_type(self.type, self.type_version, + answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job.", + form_request=None, + phase="no_valid_candidate") + else: + # Check if answers to questions are positive + contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0") + results = SpecialistResult.create_for_type(self.type, self.type_version, + answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?", + form_request=contact_form, + phase="personal_contact_data") + + return results + + def execute_personal_contact_data(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: + self.log_tuning("Traicie Selection Specialist personal_contact_data started", {}) + + results = SpecialistResult.create_for_type(self.type, self.type_version, + answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.", + phase="candidate_selected") + return results + + def execute_no_valid_candidate(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: + self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {}) + results = SpecialistResult.create_for_type(self.type, self.type_version, + answer=f"Je voldoet jammer genoeg niet aan de minimale vereisten voor deze job. Maar solliciteer gerust voor één van onze andere jobs.", + phase="no_valid_candidate") + + def execute_candidate_selected(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: + self.log_tuning("Traicie Selection Specialist candidate_selected started", {}) + results = SpecialistResult.create_for_type(self.type, self.type_version, + answer=f"We hebben je contactgegegevens verwerkt. We nemen zo snel mogelijk contact met je op.", + phase="candidate_selected") + return results + class SelectionInput(BaseModel): region: str = Field(..., alias="region") diff --git a/requirements.txt b/requirements.txt index 354743b..47d356a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -94,3 +94,4 @@ scaleway~=2.9.0 html2text~=2025.4.15 markdown~=3.8 python-json-logger~=2.0.7 +qrcode[pil]==8.2 \ No newline at end of file