diff --git a/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml b/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml
index 752a57b..be65a05 100644
--- a/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml
+++ b/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml
@@ -16,7 +16,7 @@ backstory: >
AI-driven sourcing. You’re more than a recruiter—you’re a trusted advisor, a brand ambassador, and a connector of
people and purpose.
{custom_backstory}
-full_model_name: "mistral.magistral-medium-latest"
+full_model_name: "mistral.mistral-medium-latest"
temperature: 0.3
metadata:
author: "Josako"
diff --git a/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.1.yaml b/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.1.yaml
index a0c8370..663666b 100644
--- a/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.1.yaml
+++ b/config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.1.yaml
@@ -16,7 +16,7 @@ backstory: >
AI-driven sourcing. You’re more than a recruiter—you’re a trusted advisor, a brand ambassador, and a connector of
people and purpose.
{custom_backstory}
-full_model_name: "mistral.magistral-medium-latest"
+full_model_name: "mistral.mistral-medium-latest"
temperature: 0.3
metadata:
author: "Josako"
diff --git a/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml b/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml
index f1c1aa7..74a6cb2 100644
--- a/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml
+++ b/config/tasks/traicie/TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK/1.0.1.yaml
@@ -8,8 +8,8 @@ task_description: >
- A short (1 sentence), closed-ended question (Yes / No) to ask the recruitment candidate. Use your experience to ask a question that
enables us to verify compliancy to the criterium.
- - A set of 2 short answers (1 small sentence each) to that question (positive answer / negative answer), from the
- candidates perspective.
+ - A set of 2 short answers (1 small sentence of about 10 words each) to that question (positive answer / negative answer), from the
+ candidates perspective. Do not just repeat the words already formulated in the question.
The positive answer will result in a positive evaluation of the criterium, the negative answer in a negative evaluation
of the criterium. Try to avoid just using Yes / No as positive and negative answers.
diff --git a/config/type_defs/catalog_types.py b/config/type_defs/catalog_types.py
index b543870..e0d687b 100644
--- a/config/type_defs/catalog_types.py
+++ b/config/type_defs/catalog_types.py
@@ -4,7 +4,7 @@ CATALOG_TYPES = {
"name": "Standard Catalog",
"description": "A Catalog with information in Evie's Library, to be considered as a whole",
},
- "TRAICIE_RQC": {
+ "TRAICIE_ROLE_DEFINITION_CATALOG": {
"name": "Role Definition Catalog",
"description": "A Catalog with information about roles, to be considered as a whole",
"partner": "traicie"
diff --git a/eveai_app/templates/interaction/edit_asset.html b/eveai_app/templates/interaction/edit_asset.html
index d219df4..f0f3444 100644
--- a/eveai_app/templates/interaction/edit_asset.html
+++ b/eveai_app/templates/interaction/edit_asset.html
@@ -26,7 +26,7 @@
Type Version: {{ asset.type_version }}
File Type: {{ asset.file_type }}
-
File Size: {{ asset.file_size or 'N/A' }} bytes
+
File Size: {{ asset.file_size or 'N/A' }} MiB
diff --git a/eveai_app/templates/interaction/view_specialist_magic_link_urls.html b/eveai_app/templates/interaction/view_specialist_magic_link_urls.html
new file mode 100644
index 0000000..2fd666f
--- /dev/null
+++ b/eveai_app/templates/interaction/view_specialist_magic_link_urls.html
@@ -0,0 +1,41 @@
+{% extends 'base.html' %}
+{% from "macros.html" import render_field %}
+
+{% block title %}Specialist Magic Link URLs{% endblock %}
+
+{% block content_title %}Specialist Magic Link URLs{% endblock %}
+{% block content_description %}View URL and QR Code for a Magic Link{% endblock %}
+
+{% block content %}
+
+{% endblock %}
+
+{% block content_footer %}
+
+{% endblock %}
diff --git a/eveai_app/views/document_views.py b/eveai_app/views/document_views.py
index 50eccd1..0d3e208 100644
--- a/eveai_app/views/document_views.py
+++ b/eveai_app/views/document_views.py
@@ -392,7 +392,7 @@ def add_document():
flash(f'Processing on document {new_doc.name}, version {new_doc_vers.id} started. Task ID: {task_id}.',
'success')
- return redirect(prefixed_url_for('document_bp.documents'))
+ return redirect(prefixed_url_for('document_bp.documents_processing'))
except EveAIException as e:
flash(str(e), 'error')
@@ -451,7 +451,7 @@ def add_url():
flash(f'Processing on document {new_doc.name}, version {new_doc_vers.id} started. Task ID: {task_id}.',
'success')
- return redirect(prefixed_url_for('document_bp.documents'))
+ return redirect(prefixed_url_for('document_bp.documents_processing'))
except EveAIException as e:
current_app.logger.error(f"Error adding document: {str(e)}")
diff --git a/eveai_app/views/interaction_forms.py b/eveai_app/views/interaction_forms.py
index 6d9259c..783f2df 100644
--- a/eveai_app/views/interaction_forms.py
+++ b/eveai_app/views/interaction_forms.py
@@ -134,8 +134,7 @@ class EditSpecialistMagicLinkForm(DynamicFormBase):
render_kw={'readonly': True})
specialist_id = IntegerField('Specialist', validators=[DataRequired()], render_kw={'readonly': True})
specialist_name = StringField('Specialist Name', validators=[DataRequired()], render_kw={'readonly': True})
- chat_client_url = StringField('Chat Client URL', validators=[Optional()], render_kw={'readonly': True})
- qr_code_url = StringField('QR Code', validators=[Optional()], render_kw={'readonly': True})
+
tenant_make_id = SelectField('Tenant Make', validators=[Optional()], coerce=int)
valid_from = DateField('Valid From', id='form-control datepicker', validators=[Optional()])
valid_to = DateField('Valid To', id='form-control datepicker', validators=[Optional()])
@@ -158,5 +157,14 @@ class EditSpecialistMagicLinkForm(DynamicFormBase):
self.tenant_make_id.choices = [(0, 'None')] + [(make.id, make.name) for make in tenant_makes]
+class ViewSpecialistMagicLinkURLsForm(FlaskForm):
+ name = StringField('Name', validators=[DataRequired(), Length(max=50)])
+ description = TextAreaField('Description', validators=[Optional()])
+ magic_link_code = StringField('Magic Link Code', validators=[DataRequired(), Length(max=55)], render_kw={'readonly': True})
+
+ chat_client_url = StringField('Chat Client URL', validators=[Optional()], render_kw={'readonly': True})
+ qr_code_url = StringField('QR Code', validators=[Optional()], render_kw={'readonly': True})
+
+
diff --git a/eveai_app/views/interaction_views.py b/eveai_app/views/interaction_views.py
index cb0a60c..4eb17a0 100644
--- a/eveai_app/views/interaction_views.py
+++ b/eveai_app/views/interaction_views.py
@@ -29,7 +29,7 @@ from common.utils.view_assistants import form_validation_failed, prepare_table_f
from .interaction_forms import (SpecialistForm, EditSpecialistForm, EditEveAIAgentForm, EditEveAITaskForm,
EditEveAIToolForm, ExecuteSpecialistForm,
- SpecialistMagicLinkForm, EditSpecialistMagicLinkForm)
+ SpecialistMagicLinkForm, EditSpecialistMagicLinkForm, ViewSpecialistMagicLinkURLsForm)
from eveai_app.views.list_views.interaction_list_views import (get_specialists_list_view, get_assets_list_view,
get_magic_links_list_view, get_chat_sessions_list_view,
@@ -648,52 +648,6 @@ def edit_specialist_magic_link(specialist_magic_link_id):
else:
form.tenant_make_id.data = specialist_ml.tenant_make_id
- # Set the chat client URL
- tenant_id = session.get('tenant').get('id')
- chat_client_prefix = current_app.config.get('CHAT_CLIENT_PREFIX', 'chat_client/chat/')
- base_url = request.url_root
- magic_link_code = specialist_ml.magic_link_code
-
- # Parse the URL om poortinformatie te behouden als deze afwijkt van de standaard
- url_parts = request.url.split('/')
- host_port = url_parts[2] # Dit bevat zowel hostname als poort indien aanwezig
-
- # Generate the full URL for chat client with magic link code
- chat_client_url = f"{request.scheme}://{host_port}/{chat_client_prefix}{magic_link_code}"
- form.chat_client_url.data = chat_client_url
-
- # Generate QR code as data URI for direct embedding in HTML
- try:
- import qrcode
- import io
- import base64
-
- # Generate QR code as PNG for better compatibility
- qr = qrcode.QRCode(
- version=1,
- error_correction=qrcode.constants.ERROR_CORRECT_L,
- box_size=10,
- border=4
- )
- qr.add_data(chat_client_url)
- qr.make(fit=True)
-
- # Generate PNG image in memory
- img = qr.make_image(fill_color="black", back_color="white")
- buffer = io.BytesIO()
- img.save(buffer, format='PNG')
- img_data = buffer.getvalue()
-
- # Create data URI for direct embedding in HTML
- img_base64 = base64.b64encode(img_data).decode('utf-8')
- data_uri = f"data:image/png;base64,{img_base64}"
-
- # Store the data URI in the form data
- form.qr_code_url.data = data_uri
- except Exception as e:
- current_app.logger.error(f"Failed to generate QR code: {str(e)}")
- form.qr_code_url.data = "Error generating QR code"
-
if form.validate_on_submit():
# Update the basic fields
form.populate_obj(specialist_ml)
@@ -722,6 +676,61 @@ def edit_specialist_magic_link(specialist_magic_link_id):
return render_template('interaction/edit_specialist_magic_link.html', form=form)
+@interaction_bp.route('/view_specialist_magic_link_urls/', methods=['GET'])
+@roles_accepted('Super User', 'Partner Admin', 'Tenant Admin')
+def view_specialist_magic_link_urls(specialist_magic_link_id):
+ specialist_ml = SpecialistMagicLink.query.get_or_404(specialist_magic_link_id)
+ form = ViewSpecialistMagicLinkURLsForm(obj=specialist_ml)
+
+ # Set the chat client URL
+ tenant_id = session.get('tenant').get('id')
+ chat_client_prefix = current_app.config.get('CHAT_CLIENT_PREFIX', 'chat_client/chat/')
+ base_url = request.url_root
+ magic_link_code = specialist_ml.magic_link_code
+
+ # Parse the URL om poortinformatie te behouden als deze afwijkt van de standaard
+ url_parts = request.url.split('/')
+ host_port = url_parts[2] # Dit bevat zowel hostname als poort indien aanwezig
+
+ # Generate the full URL for chat client with magic link code
+ chat_client_url = f"{request.scheme}://{host_port}/{chat_client_prefix}{magic_link_code}"
+ form.chat_client_url.data = chat_client_url
+
+ # Generate QR code as data URI for direct embedding in HTML
+ try:
+ import qrcode
+ import io
+ import base64
+
+ # Generate QR code as PNG for better compatibility
+ qr = qrcode.QRCode(
+ version=1,
+ error_correction=qrcode.constants.ERROR_CORRECT_L,
+ box_size=10,
+ border=4
+ )
+ qr.add_data(chat_client_url)
+ qr.make(fit=True)
+
+ # Generate PNG image in memory
+ img = qr.make_image(fill_color="black", back_color="white")
+ buffer = io.BytesIO()
+ img.save(buffer, format='PNG')
+ img_data = buffer.getvalue()
+
+ # Create data URI for direct embedding in HTML
+ img_base64 = base64.b64encode(img_data).decode('utf-8')
+ data_uri = f"data:image/png;base64,{img_base64}"
+
+ # Store the data URI in the form data
+ form.qr_code_url.data = data_uri
+ except Exception as e:
+ current_app.logger.error(f"Failed to generate QR code: {str(e)}")
+ form.qr_code_url.data = "Error generating QR code"
+
+ return render_template('interaction/view_specialist_magic_link_urls.html', form=form)
+
+
@interaction_bp.route('/specialist_magic_links', methods=['GET', 'POST'])
@roles_accepted('Super User', 'Partner Admin', 'Tenant Admin')
def specialist_magic_links():
@@ -743,6 +752,9 @@ def handle_specialist_magic_link_selection():
if action == "edit_specialist_magic_link":
return redirect(prefixed_url_for('interaction_bp.edit_specialist_magic_link',
specialist_magic_link_id=specialist_ml_id))
+ if action == "view_specialist_magic_link_urls":
+ return redirect(prefixed_url_for('interaction_bp.view_specialist_magic_link_urls',
+ specialist_magic_link_id=specialist_ml_id))
return redirect(prefixed_url_for('interaction_bp.specialists'))
diff --git a/eveai_app/views/list_views/interaction_list_views.py b/eveai_app/views/list_views/interaction_list_views.py
index 0a69428..e4586ef 100644
--- a/eveai_app/views/list_views/interaction_list_views.py
+++ b/eveai_app/views/list_views/interaction_list_views.py
@@ -132,7 +132,8 @@ def get_magic_links_list_view():
# Action definitions
actions = [
{'value': 'edit_specialist_magic_link', 'text': 'Edit Magic Link', 'class': 'btn-primary', 'requiresSelection': True},
- {'value': 'create_specialist_magic_link', 'text': 'Create Magic Link', 'class': 'btn-success', 'position': 'right', 'requiresSelection': False}
+ {'value': 'view_specialist_magic_link_urls', 'text': 'View Magic Link URLs', 'class': 'btn-secondary', 'requiresSelection': True},
+ {'value': 'create_specialist_magic_link', 'text': 'Create Magic Link', 'class': 'btn-success', 'position': 'right', 'requiresSelection': False},
]
# Initial sort configuration
diff --git a/eveai_chat_workers/outputs/traicie/knockout_questions/knockout_questions_v1_0.py b/eveai_chat_workers/outputs/traicie/knockout_questions/knockout_questions_v1_0.py
index 3d55242..5754b31 100644
--- a/eveai_chat_workers/outputs/traicie/knockout_questions/knockout_questions_v1_0.py
+++ b/eveai_chat_workers/outputs/traicie/knockout_questions/knockout_questions_v1_0.py
@@ -43,3 +43,11 @@ class KOQuestions(BaseModel):
"""Get the list of KOQuestion objects"""
return self.ko_questions
+ def get_by_title(self, title: str) -> Optional[KOQuestion]:
+ return next((q for q in self.ko_questions if q.title == title), None)
+
+ def get_next_by_title(self, title: str) -> Optional[KOQuestion]:
+ for idx, q in enumerate(self.ko_questions):
+ if q.title == title:
+ return self.ko_questions[idx + 1] if idx + 1 < len(self.ko_questions) else None
+ return None
\ No newline at end of file
diff --git a/eveai_chat_workers/specialists/crewai_base_specialist.py b/eveai_chat_workers/specialists/crewai_base_specialist.py
index 3bb1087..12612a6 100644
--- a/eveai_chat_workers/specialists/crewai_base_specialist.py
+++ b/eveai_chat_workers/specialists/crewai_base_specialist.py
@@ -152,6 +152,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
agent_backstory = self._replace_system_variables(agent_backstory)
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
+ current_app.logger.debug(f"Full model name for {agent.type}: {agent_full_model_name}")
agent_temperature = agent_config.get('temperature', 0.3)
llm = get_crewai_llm(agent_full_model_name, agent_temperature)
if not llm:
@@ -331,6 +332,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
for state_name, result_name in self._state_result_relations.items():
if result_name in last_interaction.specialist_results:
setattr(self.flow.state, state_name, last_interaction.specialist_results[result_name])
+ #TODO: Hier wordt steeds een dict of json terug gegeven, geen pydantic model?
# Initialize the standard state values
self.flow.state.answer = None
diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py b/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py
index e081a60..2be50d4 100644
--- a/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py
+++ b/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py
@@ -1,4 +1,5 @@
import json
+import random
from datetime import date
from typing import Optional, List, Dict, Any
@@ -33,6 +34,17 @@ KO_CRITERIA_NOT_MET_MESSAGE = ("Thank you for answering our questions! We proces
"not comply with the minimum requirements for this job. Therefor, we stop this"
"selection procedure")
KO_CRITERIA_MET_MESSAGE = "We processed your answers with a positive result."
+KO_CRITERIA_NEXT_MESSAGES = [
+ "Thank you for your answer. Here's a next question.",
+ "Your answer fits our needs. We have yet another question to ask you.",
+ "Positive this far! Here's a follow-up question.",
+ "Great, that’s just what we were hoping for. Let’s continue with another question.",
+ "Appreciate your reply! Here's the next one.",
+ "Thanks for the input. Let’s move on to the next question.",
+ "That’s exactly what we needed to hear. Here comes the next question.",
+ "Looks promising! Let’s continue with another quick check.",
+ "Thanks! Here's another point we'd like to clarify."
+]
RQC_MESSAGE = "You are well suited for this job."
CONTACT_DATA_QUESTION = ("Are you willing to provide us with your contact data, so we can contact you to continue "
"the selection process?")
@@ -83,10 +95,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def _config_state_result_relations(self):
self._add_state_result_relation("rag_output")
- self._add_state_result_relation("ko_criteria_questions")
- self._add_state_result_relation("ko_criteria_answers")
- self._add_state_result_relation("competency_questions")
- self._add_state_result_relation("competency_scores")
+ self._add_state_result_relation("ko_criteria_scores")
+ self._add_state_result_relation("current_ko_criterium")
+ self._add_state_result_relation("current_ko_criterium_idx")
self._add_state_result_relation("personal_contact_data")
self._add_state_result_relation("contact_time_prefs")
@@ -175,25 +186,10 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
answer = initialisation_message
ko_questions = self._get_ko_questions()
- fields = {}
- for ko_question in ko_questions.ko_questions:
- fields[ko_question.title] = {
- "name": ko_question.title,
- "description": ko_question.title,
- "context": ko_question.question,
- "type": "options",
- "required": True,
- "allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
- }
- ko_form = {
- "type": "KO_CRITERIA_FORM",
- "version": "1.0.0",
- "name": "Starter Questions",
- "icon": "verified",
- "fields": fields,
- }
- ko_form = TranslationServices.translate_config(self.tenant_id, ko_form, "fields", arguments.language)
+ current_ko_criterium = ko_questions.ko_questions[0].title
+ current_ko_criterium_idx = 0
+ ko_form = self._prepare_ko_question_form(ko_questions, current_ko_criterium, arguments.language)
rag_answer = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_answer:
@@ -202,6 +198,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
else:
answer = rag_answer.answer
+ self.flow.state.current_ko_criterium = current_ko_criterium
+ self.flow.state.current_ko_criterium_idx = current_ko_criterium_idx
+ self.flow.state.ko_criteria_scores = []
self.flow.state.answer = answer
self.flow.state.phase = "ko_question_evaluation"
self.flow.state.form_request = ko_form
@@ -219,21 +218,40 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
"No form values returned")
+ ko_questions = self._get_ko_questions()
+
+ # DEBUG CHECKS: Valideer het type van het resultaat
+ current_app.logger.debug(f"KO Questions result type: {type(ko_questions)}")
+ current_app.logger.debug(f"Is KOQuestions instance: {isinstance(ko_questions, KOQuestions)}")
+ current_app.logger.debug(f"KO Questions model dump: {ko_questions.model_dump()}")
+ current_app.logger.debug(
+ f"Number of ko_questions: {len(ko_questions.ko_questions) if hasattr(ko_questions, 'ko_questions') else 'No ko_questions attribute'}")
+
+ # Extra check: valideer elk item in de lijst
+ if hasattr(ko_questions, 'ko_questions') and ko_questions.ko_questions:
+ current_app.logger.debug(f"First question type: {type(ko_questions.ko_questions[0])}")
+ current_app.logger.debug(
+ f"First question is KOQuestion: {isinstance(ko_questions.ko_questions[0], KOQuestion)}")
+ current_app.logger.debug(
+ f"First question data: {ko_questions.ko_questions[0].model_dump() if hasattr(ko_questions.ko_questions[0], 'model_dump') else ko_questions.ko_questions[0]}")
+
+ previous_idx = self.flow.state.current_ko_criterium_idx
# Load the previous KO Questions
- previous_ko_questions = self._get_ko_questions().ko_questions
+ previous_ko_question = ko_questions.ko_questions[previous_idx]
# Evaluate KO Criteria
evaluation = "positive"
- for criterium, answer in arguments.form_values.items():
- for qa in previous_ko_questions:
- if qa.title == criterium:
- if TranslationServices.translate(self.tenant_id, qa.answer_positive, arguments.language) != answer:
- evaluation = "negative"
- break
- if evaluation == "negative":
- break
+ criterium, answer = next(iter(arguments.form_values.items()))
+ if TranslationServices.translate(self.tenant_id, previous_ko_question.answer_positive, arguments.language) != answer:
+ evaluation = "negative"
- self.flow.state.ko_criteria_answers = arguments.form_values
+ score = SelectionKOCriteriumScore(
+ criterium=criterium,
+ answer=answer,
+ score=1 if evaluation == "positive" else 0,
+ )
+
+ self.flow.state.ko_criteria_scores.append(score)
if evaluation == "negative":
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)
@@ -243,16 +261,35 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
results = SelectionResult.create_for_type(self.type, self.type_version)
else:
- answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
- if rag_output:
- answer = f"{answer}\n\n{rag_output.answer}"
- answer = (f"{answer}\n\n"
- f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} "
- f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
+ next_idx = previous_idx + 1
- self.flow.state.answer = answer
- self.flow.state.phase = "personal_contact_data_preparation"
+ if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated
+ next_ko_criterium = ko_questions.ko_questions[next_idx]
+ ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, arguments.language)
+ next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES)
+ answer = TranslationServices.translate(self.tenant_id, next_message, arguments.language)
+ if rag_output:
+ answer = f"{rag_output.answer}\n\n{answer}"
+
+ self.flow.state.answer = answer
+ self.flow.state.form_request = ko_form
+ self.flow.state.current_ko_criterium = next_ko_criterium.title
+ self.flow.state.current_ko_criterium_idx = next_idx
+ self.flow.state.phase = "ko_question_evaluation"
+ else: # All KO Criteria have been met
+ answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language)
+ rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
+ if rag_output:
+ answer = f"{answer}\n\n{rag_output.answer}"
+ answer = (f"{answer}\n\n"
+ f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} \n\n"
+ f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
+
+ self.flow.state.answer = answer
+ self.flow.state.current_ko_criterium = ""
+ self.flow.state.current_ko_criterium_idx = None
+ self.flow.state.phase = "personal_contact_data_preparation"
results = SelectionResult.create_for_type(self.type, self.type_version,)
@@ -310,7 +347,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.form_request = time_pref_form
rqc_info = {
- "ko_criteria_answers": self.flow.state.ko_criteria_answers,
+ "ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
}
@@ -336,7 +373,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.contact_time_prefs = arguments.form_values
rqc_info = {
- "ko_criteria_answers": self.flow.state.ko_criteria_answers,
+ "ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
"contact_time_prefs": self.flow.state.contact_time_prefs,
}
@@ -476,8 +513,53 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
ko_questions_asset.object_name)
ko_questions = KOQuestions.from_json(ko_questions_data)
+ # DEBUG CHECKS: Valideer het type van ko_questions_data
+ current_app.logger.debug(f"KO Questions raw data type: {type(ko_questions_data)}")
+ current_app.logger.debug(
+ f"KO Questions raw data content: {ko_questions_data[:200] if isinstance(ko_questions_data, str) else 'Not a string'}")
+
+ # DEBUG CHECKS: Valideer het type van het resultaat
+ current_app.logger.debug(f"KO Questions result type: {type(ko_questions)}")
+ current_app.logger.debug(f"Is KOQuestions instance: {isinstance(ko_questions, KOQuestions)}")
+ current_app.logger.debug(f"KO Questions model dump: {ko_questions.model_dump()}")
+ current_app.logger.debug(
+ f"Number of ko_questions: {len(ko_questions.ko_questions) if hasattr(ko_questions, 'ko_questions') else 'No ko_questions attribute'}")
+
+ # Extra check: valideer elk item in de lijst
+ if hasattr(ko_questions, 'ko_questions') and ko_questions.ko_questions:
+ current_app.logger.debug(f"First question type: {type(ko_questions.ko_questions[0])}")
+ current_app.logger.debug(
+ f"First question is KOQuestion: {isinstance(ko_questions.ko_questions[0], KOQuestion)}")
+ current_app.logger.debug(
+ f"First question data: {ko_questions.ko_questions[0].model_dump() if hasattr(ko_questions.ko_questions[0], 'model_dump') else ko_questions.ko_questions[0]}")
+
return ko_questions
+ def _prepare_ko_question_form(self, ko_questions: KOQuestions, current_ko_criterium: str, language: str) \
+ -> Dict[str, Any]:
+ fields = {}
+ ko_question = ko_questions.get_by_title(current_ko_criterium)
+ fields[ko_question.title] = {
+ "name": ko_question.title,
+ "description": ko_question.title,
+ "context": ko_question.question,
+ "type": "options",
+ "required": True,
+ "allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
+ }
+
+ ko_form = {
+ "type": "KO_CRITERIA_FORM",
+ "version": "1.0.0",
+ "name": f"Starter Question: {current_ko_criterium}",
+ "icon": "verified",
+ "fields": fields,
+ }
+
+ ko_form = TranslationServices.translate_config(self.tenant_id, ko_form, "fields", language)
+
+ return ko_form
+
class SelectionKOCriteriumScore(BaseModel):
criterium: Optional[str] = Field(None, alias="criterium")
@@ -529,7 +611,9 @@ class SelectionFlowState(EveAIFlowState):
"""Flow state for RAG specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
rag_output: Optional[RAGOutput] = None
- ko_criteria_answers: Optional[Dict[str, str]] = None
+ current_ko_criterium: Optional[str] = None
+ current_ko_criterium_idx: Optional[int] = None
+ ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = None
personal_contact_data: Optional[PersonalContactData] = None
contact_time_prefs: Optional[ContactTimePreferences] = None
citations: Optional[List[Dict[str, Any]]] = None
@@ -537,7 +621,7 @@ class SelectionFlowState(EveAIFlowState):
class SelectionResult(SpecialistResult):
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
- ko_criteria_answers: Optional[Dict[str, str]] = Field(None, alias="ko_criteria_answers")
+ ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
contact_time_prefs: Optional[ContactTimePreferences] = None