- Fixed Error where Catalog Types other than default could not be added

- Fixed error in TRAICIE_KO_INTERVIEW_DEFINITION_SPECIALIST
- Minor improvements
This commit is contained in:
Josako
2025-07-25 22:35:08 +02:00
parent ba523a95c5
commit 42ffe3795f
13 changed files with 258 additions and 102 deletions

View File

@@ -152,6 +152,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
agent_backstory = self._replace_system_variables(agent_backstory)
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
current_app.logger.debug(f"Full model name for {agent.type}: {agent_full_model_name}")
agent_temperature = agent_config.get('temperature', 0.3)
llm = get_crewai_llm(agent_full_model_name, agent_temperature)
if not llm:
@@ -331,6 +332,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
for state_name, result_name in self._state_result_relations.items():
if result_name in last_interaction.specialist_results:
setattr(self.flow.state, state_name, last_interaction.specialist_results[result_name])
#TODO: Hier wordt steeds een dict of json terug gegeven, geen pydantic model?
# Initialize the standard state values
self.flow.state.answer = None

View File

@@ -1,4 +1,5 @@
import json
import random
from datetime import date
from typing import Optional, List, Dict, Any
@@ -33,6 +34,17 @@ KO_CRITERIA_NOT_MET_MESSAGE = ("Thank you for answering our questions! We proces
"not comply with the minimum requirements for this job. Therefor, we stop this"
"selection procedure")
KO_CRITERIA_MET_MESSAGE = "We processed your answers with a positive result."
KO_CRITERIA_NEXT_MESSAGES = [
"Thank you for your answer. Here's a next question.",
"Your answer fits our needs. We have yet another question to ask you.",
"Positive this far! Here's a follow-up question.",
"Great, thats just what we were hoping for. Lets continue with another question.",
"Appreciate your reply! Here's the next one.",
"Thanks for the input. Lets move on to the next question.",
"Thats exactly what we needed to hear. Here comes the next question.",
"Looks promising! Lets continue with another quick check.",
"Thanks! Here's another point we'd like to clarify."
]
RQC_MESSAGE = "You are well suited for this job."
CONTACT_DATA_QUESTION = ("Are you willing to provide us with your contact data, so we can contact you to continue "
"the selection process?")
@@ -83,10 +95,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def _config_state_result_relations(self):
self._add_state_result_relation("rag_output")
self._add_state_result_relation("ko_criteria_questions")
self._add_state_result_relation("ko_criteria_answers")
self._add_state_result_relation("competency_questions")
self._add_state_result_relation("competency_scores")
self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("current_ko_criterium")
self._add_state_result_relation("current_ko_criterium_idx")
self._add_state_result_relation("personal_contact_data")
self._add_state_result_relation("contact_time_prefs")
@@ -175,25 +186,10 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
answer = initialisation_message
ko_questions = self._get_ko_questions()
fields = {}
for ko_question in ko_questions.ko_questions:
fields[ko_question.title] = {
"name": ko_question.title,
"description": ko_question.title,
"context": ko_question.question,
"type": "options",
"required": True,
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
}
ko_form = {
"type": "KO_CRITERIA_FORM",
"version": "1.0.0",
"name": "Starter Questions",
"icon": "verified",
"fields": fields,
}
ko_form = TranslationServices.translate_config(self.tenant_id, ko_form, "fields", arguments.language)
current_ko_criterium = ko_questions.ko_questions[0].title
current_ko_criterium_idx = 0
ko_form = self._prepare_ko_question_form(ko_questions, current_ko_criterium, arguments.language)
rag_answer = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_answer:
@@ -202,6 +198,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
else:
answer = rag_answer.answer
self.flow.state.current_ko_criterium = current_ko_criterium
self.flow.state.current_ko_criterium_idx = current_ko_criterium_idx
self.flow.state.ko_criteria_scores = []
self.flow.state.answer = answer
self.flow.state.phase = "ko_question_evaluation"
self.flow.state.form_request = ko_form
@@ -219,21 +218,40 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
"No form values returned")
ko_questions = self._get_ko_questions()
# DEBUG CHECKS: Valideer het type van het resultaat
current_app.logger.debug(f"KO Questions result type: {type(ko_questions)}")
current_app.logger.debug(f"Is KOQuestions instance: {isinstance(ko_questions, KOQuestions)}")
current_app.logger.debug(f"KO Questions model dump: {ko_questions.model_dump()}")
current_app.logger.debug(
f"Number of ko_questions: {len(ko_questions.ko_questions) if hasattr(ko_questions, 'ko_questions') else 'No ko_questions attribute'}")
# Extra check: valideer elk item in de lijst
if hasattr(ko_questions, 'ko_questions') and ko_questions.ko_questions:
current_app.logger.debug(f"First question type: {type(ko_questions.ko_questions[0])}")
current_app.logger.debug(
f"First question is KOQuestion: {isinstance(ko_questions.ko_questions[0], KOQuestion)}")
current_app.logger.debug(
f"First question data: {ko_questions.ko_questions[0].model_dump() if hasattr(ko_questions.ko_questions[0], 'model_dump') else ko_questions.ko_questions[0]}")
previous_idx = self.flow.state.current_ko_criterium_idx
# Load the previous KO Questions
previous_ko_questions = self._get_ko_questions().ko_questions
previous_ko_question = ko_questions.ko_questions[previous_idx]
# Evaluate KO Criteria
evaluation = "positive"
for criterium, answer in arguments.form_values.items():
for qa in previous_ko_questions:
if qa.title == criterium:
if TranslationServices.translate(self.tenant_id, qa.answer_positive, arguments.language) != answer:
evaluation = "negative"
break
if evaluation == "negative":
break
criterium, answer = next(iter(arguments.form_values.items()))
if TranslationServices.translate(self.tenant_id, previous_ko_question.answer_positive, arguments.language) != answer:
evaluation = "negative"
self.flow.state.ko_criteria_answers = arguments.form_values
score = SelectionKOCriteriumScore(
criterium=criterium,
answer=answer,
score=1 if evaluation == "positive" else 0,
)
self.flow.state.ko_criteria_scores.append(score)
if evaluation == "negative":
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)
@@ -243,16 +261,35 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
results = SelectionResult.create_for_type(self.type, self.type_version)
else:
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{answer}\n\n{rag_output.answer}"
answer = (f"{answer}\n\n"
f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} "
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
next_idx = previous_idx + 1
self.flow.state.answer = answer
self.flow.state.phase = "personal_contact_data_preparation"
if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated
next_ko_criterium = ko_questions.ko_questions[next_idx]
ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, arguments.language)
next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES)
answer = TranslationServices.translate(self.tenant_id, next_message, arguments.language)
if rag_output:
answer = f"{rag_output.answer}\n\n{answer}"
self.flow.state.answer = answer
self.flow.state.form_request = ko_form
self.flow.state.current_ko_criterium = next_ko_criterium.title
self.flow.state.current_ko_criterium_idx = next_idx
self.flow.state.phase = "ko_question_evaluation"
else: # All KO Criteria have been met
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{answer}\n\n{rag_output.answer}"
answer = (f"{answer}\n\n"
f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} \n\n"
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
self.flow.state.answer = answer
self.flow.state.current_ko_criterium = ""
self.flow.state.current_ko_criterium_idx = None
self.flow.state.phase = "personal_contact_data_preparation"
results = SelectionResult.create_for_type(self.type, self.type_version,)
@@ -310,7 +347,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.form_request = time_pref_form
rqc_info = {
"ko_criteria_answers": self.flow.state.ko_criteria_answers,
"ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
}
@@ -336,7 +373,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.contact_time_prefs = arguments.form_values
rqc_info = {
"ko_criteria_answers": self.flow.state.ko_criteria_answers,
"ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
"contact_time_prefs": self.flow.state.contact_time_prefs,
}
@@ -476,8 +513,53 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
ko_questions_asset.object_name)
ko_questions = KOQuestions.from_json(ko_questions_data)
# DEBUG CHECKS: Valideer het type van ko_questions_data
current_app.logger.debug(f"KO Questions raw data type: {type(ko_questions_data)}")
current_app.logger.debug(
f"KO Questions raw data content: {ko_questions_data[:200] if isinstance(ko_questions_data, str) else 'Not a string'}")
# DEBUG CHECKS: Valideer het type van het resultaat
current_app.logger.debug(f"KO Questions result type: {type(ko_questions)}")
current_app.logger.debug(f"Is KOQuestions instance: {isinstance(ko_questions, KOQuestions)}")
current_app.logger.debug(f"KO Questions model dump: {ko_questions.model_dump()}")
current_app.logger.debug(
f"Number of ko_questions: {len(ko_questions.ko_questions) if hasattr(ko_questions, 'ko_questions') else 'No ko_questions attribute'}")
# Extra check: valideer elk item in de lijst
if hasattr(ko_questions, 'ko_questions') and ko_questions.ko_questions:
current_app.logger.debug(f"First question type: {type(ko_questions.ko_questions[0])}")
current_app.logger.debug(
f"First question is KOQuestion: {isinstance(ko_questions.ko_questions[0], KOQuestion)}")
current_app.logger.debug(
f"First question data: {ko_questions.ko_questions[0].model_dump() if hasattr(ko_questions.ko_questions[0], 'model_dump') else ko_questions.ko_questions[0]}")
return ko_questions
def _prepare_ko_question_form(self, ko_questions: KOQuestions, current_ko_criterium: str, language: str) \
-> Dict[str, Any]:
fields = {}
ko_question = ko_questions.get_by_title(current_ko_criterium)
fields[ko_question.title] = {
"name": ko_question.title,
"description": ko_question.title,
"context": ko_question.question,
"type": "options",
"required": True,
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
}
ko_form = {
"type": "KO_CRITERIA_FORM",
"version": "1.0.0",
"name": f"Starter Question: {current_ko_criterium}",
"icon": "verified",
"fields": fields,
}
ko_form = TranslationServices.translate_config(self.tenant_id, ko_form, "fields", language)
return ko_form
class SelectionKOCriteriumScore(BaseModel):
criterium: Optional[str] = Field(None, alias="criterium")
@@ -529,7 +611,9 @@ class SelectionFlowState(EveAIFlowState):
"""Flow state for RAG specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
rag_output: Optional[RAGOutput] = None
ko_criteria_answers: Optional[Dict[str, str]] = None
current_ko_criterium: Optional[str] = None
current_ko_criterium_idx: Optional[int] = None
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = None
personal_contact_data: Optional[PersonalContactData] = None
contact_time_prefs: Optional[ContactTimePreferences] = None
citations: Optional[List[Dict[str, Any]]] = None
@@ -537,7 +621,7 @@ class SelectionFlowState(EveAIFlowState):
class SelectionResult(SpecialistResult):
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
ko_criteria_answers: Optional[Dict[str, str]] = Field(None, alias="ko_criteria_answers")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
contact_time_prefs: Optional[ContactTimePreferences] = None