- logging improvement and simplification (no more graylog)
- Traicie Selection Specialist Round Trip - Session improvements + debugging enabled - Tone of Voice & Langauge Level definitions introduced
This commit is contained in:
@@ -5,7 +5,7 @@ import os
|
||||
|
||||
from common.utils.celery_utils import make_celery, init_celery
|
||||
from common.extensions import db, cache_manager
|
||||
from config.logging_config import LOGGING
|
||||
from config.logging_config import configure_logging
|
||||
from config.config import get_config
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ def create_app(config_file=None):
|
||||
case _:
|
||||
app.config.from_object(get_config('dev'))
|
||||
|
||||
logging.config.dictConfig(LOGGING)
|
||||
configure_logging()
|
||||
|
||||
app.logger.info('Starting up eveai_chat_workers...')
|
||||
register_extensions(app)
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
LANGUAGE_LEVEL = [
|
||||
{
|
||||
"name": "Basic",
|
||||
"description": "Short, simple sentences. Minimal jargon. Lots of visual and concrete language.",
|
||||
"cefr_level": "A2 - B1",
|
||||
"ideal_audience": "Manual laborers, entry-level roles, newcomers with another native language"
|
||||
},
|
||||
{
|
||||
"name": "Standard",
|
||||
"description": "Clear spoken language. Well-formulated without difficult words.",
|
||||
"cefr_level": "B2",
|
||||
"ideal_audience": "Retail, administration, logistics, early-career professionals"
|
||||
},
|
||||
{
|
||||
"name": "Professional",
|
||||
"description": "Business language with technical terms where needed. More complex sentence structures.",
|
||||
"cefr_level": "C1",
|
||||
"ideal_audience": "Management, HR, technical profiles"
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,32 @@
|
||||
TONE_OF_VOICE = [
|
||||
{
|
||||
"name": "Professional & Neutral",
|
||||
"description": "Business-like, clear, to the point. Focused on facts.",
|
||||
"when_to_use": "Corporate jobs, legal roles, formal sectors"
|
||||
},
|
||||
{
|
||||
"name": "Warm & Empathetic",
|
||||
"description": "Human, compassionate, reassuring.",
|
||||
"when_to_use": "Healthcare, education, HR, social professions"
|
||||
},
|
||||
{
|
||||
"name": "Energetic & Enthusiastic",
|
||||
"description": "Upbeat, persuasive, motivating.",
|
||||
"when_to_use": "Sales, marketing, hospitality, start-ups"
|
||||
},
|
||||
{
|
||||
"name": "Accessible & Informal",
|
||||
"description": "Casual, approachable, friendly, and human.",
|
||||
"when_to_use": "Youth-focused, entry-level, retail, creative sectors"
|
||||
},
|
||||
{
|
||||
"name": "Expert & Trustworthy",
|
||||
"description": "Calm authority, advisory tone, knowledgeable.",
|
||||
"when_to_use": "IT, engineering, consultancy, medical profiles"
|
||||
},
|
||||
{
|
||||
"name": "No-nonsense & Goal-driven",
|
||||
"description": "Direct, efficient, pragmatic.",
|
||||
"when_to_use": "Technical, logistics, blue-collar jobs, production environments"
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,15 @@
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
|
||||
class KOQuestion(BaseModel):
|
||||
title: str = Field(..., description="The title of the knockout criterium.")
|
||||
question: str = Field(..., description="The corresponding question asked to the candidate.")
|
||||
answer_positive: Optional[str] = Field(None, description="The answer to the question, resulting in a positive outcome.")
|
||||
answer_negative: Optional[str] = Field(None, description="The answer to the question, resulting in a negative outcome.")
|
||||
|
||||
class KOQuestions(BaseModel):
|
||||
ko_questions: List[KOQuestion] = Field(
|
||||
default_factory=list,
|
||||
description="KO Questions and answers."
|
||||
)
|
||||
@@ -4,7 +4,8 @@ from typing import Dict, Any, List
|
||||
from flask import current_app
|
||||
|
||||
from common.extensions import cache_manager
|
||||
from common.models.interaction import SpecialistRetriever
|
||||
from common.models.interaction import SpecialistRetriever, Specialist
|
||||
from common.models.user import Tenant
|
||||
from common.utils.execution_progress import ExecutionProgressTracker
|
||||
from config.logging_config import TuningLogger
|
||||
from eveai_chat_workers.retrievers.base import BaseRetriever
|
||||
@@ -17,7 +18,9 @@ class BaseSpecialistExecutor(ABC):
|
||||
|
||||
def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id: str):
|
||||
self.tenant_id = tenant_id
|
||||
self.tenant = Tenant.query.get_or_404(tenant_id)
|
||||
self.specialist_id = specialist_id
|
||||
self.specialist = Specialist.query.get_or_404(specialist_id)
|
||||
self.session_id = session_id
|
||||
self.task_id = task_id
|
||||
self.tuning = False
|
||||
@@ -96,6 +99,37 @@ class BaseSpecialistExecutor(ABC):
|
||||
def update_progress(self, processing_type, data) -> None:
|
||||
self.ept.send_update(self.task_id, processing_type, data)
|
||||
|
||||
def _replace_system_variables(self, text: str) -> str:
|
||||
"""
|
||||
Replace all system variables in the text with their corresponding values.
|
||||
System variables are in the format 'tenant_<attribute_name>'
|
||||
|
||||
Args:
|
||||
text: The text containing system variables to replace
|
||||
|
||||
Returns:
|
||||
str: The text with all system variables replaced
|
||||
"""
|
||||
if not text:
|
||||
return text
|
||||
|
||||
from common.utils.model_utils import replace_variable_in_template
|
||||
|
||||
# Find all tenant_* variables and replace them with tenant attribute values
|
||||
# Format of variables: tenant_name, tenant_code, etc.
|
||||
result = text
|
||||
|
||||
# Get all attributes of the tenant object
|
||||
tenant_attrs = vars(self.tenant)
|
||||
|
||||
# Replace all tenant_* variables
|
||||
for attr_name, attr_value in tenant_attrs.items():
|
||||
variable = f"tenant_{attr_name}"
|
||||
if variable in result:
|
||||
result = replace_variable_in_template(result, variable, str(attr_value))
|
||||
|
||||
return result
|
||||
|
||||
@abstractmethod
|
||||
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
|
||||
"""Execute the specialist's logic"""
|
||||
|
||||
@@ -33,10 +33,6 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
||||
def __init__(self, tenant_id: int, specialist_id: int, session_id: str, task_id):
|
||||
super().__init__(tenant_id, specialist_id, session_id, task_id)
|
||||
|
||||
# Check and load the specialist
|
||||
self.specialist = Specialist.query.get_or_404(specialist_id)
|
||||
# Set the specific configuration for the SPIN Specialist
|
||||
# self.specialist_configuration = json.loads(self.specialist.configuration)
|
||||
self.tuning = self.specialist.tuning
|
||||
# Initialize retrievers
|
||||
self.retrievers = self._initialize_retrievers()
|
||||
@@ -127,7 +123,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
||||
for agent in self.specialist.agents:
|
||||
agent_config = cache_manager.agents_config_cache.get_config(agent.type, agent.type_version)
|
||||
agent_role = agent_config.get('role', '').replace('{custom_role}', agent.role or '')
|
||||
agent_role = self._replace_system_variables(agent_role)
|
||||
agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '')
|
||||
agent_goal = self._replace_system_variables(agent_goal)
|
||||
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
|
||||
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
|
||||
agent_temperature = agent_config.get('temperature', 0.3)
|
||||
@@ -152,6 +150,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
||||
task_config = cache_manager.tasks_config_cache.get_config(task.type, task.type_version)
|
||||
task_description = (task_config.get('task_description', '')
|
||||
.replace('{custom_description}', task.task_description or ''))
|
||||
task_description = self._replace_system_variables(task_description)
|
||||
task_expected_output = (task_config.get('expected_output', '')
|
||||
.replace('{custom_expected_output}', task.expected_output or ''))
|
||||
# dynamically build the arguments
|
||||
@@ -161,9 +160,12 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
||||
"verbose": task.tuning
|
||||
}
|
||||
task_name = task.type.lower()
|
||||
current_app.logger.debug(f"Task {task_name} is getting processed")
|
||||
if task_name in self._task_pydantic_outputs:
|
||||
task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name]
|
||||
current_app.logger.debug(f"Task {task_name} has an output pydantic: {self._task_pydantic_outputs[task_name]}")
|
||||
if task_name in self._task_agents:
|
||||
current_app.logger.debug(f"Task {task_name} has an agent: {self._task_agents[task_name]}")
|
||||
task_kwargs["agent"] = self._agents[self._task_agents[task_name]]
|
||||
|
||||
# Instantiate the task with dynamic arguments
|
||||
|
||||
@@ -18,6 +18,9 @@ from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Co
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
from common.services.interaction.specialist_services import SpecialistServices
|
||||
|
||||
NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST"
|
||||
NEW_SPECIALIST_TYPE_VERSION = "1.3"
|
||||
|
||||
|
||||
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
"""
|
||||
@@ -117,8 +120,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
new_specialist = Specialist(
|
||||
name=name,
|
||||
description=f"Specialist for {arguments.role_name} role",
|
||||
type="TRAICIE_SELECTION_SPECIALIST",
|
||||
type_version="1.1",
|
||||
type=NEW_SPECIALIST_TYPE,
|
||||
type_version=NEW_SPECIALIST_TYPE_VERSION,
|
||||
tuning=False,
|
||||
configuration=selection_config,
|
||||
)
|
||||
@@ -130,7 +133,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
current_app.logger.error(f"Error creating selection specialist: {str(e)}")
|
||||
raise e
|
||||
|
||||
SpecialistServices.initialize_specialist(new_specialist.id, "TRAICIE_SELECTION_SPECIALIST", "1.0")
|
||||
SpecialistServices.initialize_specialist(new_specialist.id, NEW_SPECIALIST_TYPE, NEW_SPECIALIST_TYPE_VERSION)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,273 @@
|
||||
import asyncio
|
||||
import json
|
||||
from os import wait
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import date
|
||||
from time import sleep
|
||||
from crewai.flow.flow import start, listen, and_
|
||||
from flask import current_app
|
||||
from pydantic import BaseModel, Field, EmailStr
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from common.extensions import db
|
||||
from common.models.user import Tenant
|
||||
from common.models.interaction import Specialist
|
||||
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
|
||||
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestions, KOQuestion
|
||||
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
|
||||
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
|
||||
from eveai_chat_workers.outputs.traicie.competencies.competencies_v1_1 import Competencies
|
||||
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
|
||||
from common.services.interaction.specialist_services import SpecialistServices
|
||||
from common.extensions import cache_manager
|
||||
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL
|
||||
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE
|
||||
|
||||
|
||||
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
"""
|
||||
type: TRAICIE_SELECTION_SPECIALIST
|
||||
type_version: 1.1
|
||||
Traicie Selection Specialist Executor class
|
||||
"""
|
||||
|
||||
def __init__(self, tenant_id, specialist_id, session_id, task_id, **kwargs):
|
||||
self.role_definition_crew = None
|
||||
|
||||
super().__init__(tenant_id, specialist_id, session_id, task_id)
|
||||
|
||||
# Load the Tenant & set language
|
||||
self.tenant = Tenant.query.get_or_404(tenant_id)
|
||||
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return "TRAICIE_SELECTION_SPECIALIST"
|
||||
|
||||
@property
|
||||
def type_version(self) -> str:
|
||||
return "1.3"
|
||||
|
||||
def _config_task_agents(self):
|
||||
self._add_task_agent("traicie_ko_criteria_interview_definition_task", "traicie_recruiter_agent")
|
||||
|
||||
def _config_pydantic_outputs(self):
|
||||
self._add_pydantic_output("traicie_ko_criteria_interview_definition_task", KOQuestions, "ko_questions")
|
||||
|
||||
def _instantiate_specialist(self):
|
||||
verbose = self.tuning
|
||||
|
||||
ko_def_agents = [self.traicie_recruiter_agent]
|
||||
ko_def_tasks = [self.traicie_ko_criteria_interview_definition_task]
|
||||
self.ko_def_crew = EveAICrewAICrew(
|
||||
self,
|
||||
"KO Criteria Interview Definition Crew",
|
||||
agents=ko_def_agents,
|
||||
tasks=ko_def_tasks,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
self.flow = SelectionFlow(
|
||||
self,
|
||||
self.ko_def_crew
|
||||
)
|
||||
|
||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist execution started", {})
|
||||
|
||||
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
|
||||
current_app.logger.debug(f"Formatted Context: {formatted_context}")
|
||||
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
|
||||
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
|
||||
|
||||
if not self._cached_session.interactions:
|
||||
specialist_phase = "initial"
|
||||
else:
|
||||
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
||||
|
||||
results = None
|
||||
|
||||
match specialist_phase:
|
||||
case "initial":
|
||||
results = self.execute_initial_state(arguments, formatted_context, citations)
|
||||
case "ko_questions":
|
||||
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0")
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=f"We hebben de antwoorden op de KO criteria verwerkt. Je bent een geschikte kandidaat. Kan je je contactegevens doorgeven?",
|
||||
form_request=contact_form,
|
||||
phase="personal_contact_data")
|
||||
case "personal_contact_data":
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=f"We hebben de contactgegevens verwerkt. We nemen zo snel mogelijk contact met je op.",
|
||||
phase="candidate_selected")
|
||||
|
||||
self.log_tuning(f"Traicie Selection Specialist execution ended", {"Results": results.model_dump() if results else "No info"})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def execute_initial_state(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||
self.log_tuning("Traicie Selection Specialist initial_state_execution started", {})
|
||||
|
||||
knockout_competencies = [
|
||||
{
|
||||
"title": c["title"],
|
||||
"description": c["description"]
|
||||
}
|
||||
for c in self.specialist.configuration.get("competencies", [])
|
||||
if c.get("is_knockout") is True
|
||||
]
|
||||
|
||||
# Convert TONE_OF_VOICE en LANGUAGE_LEVEL lists tp strings usable by the LLM
|
||||
tone_of_voice_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nWhen to use: {item['when_to_use']}" for item in TONE_OF_VOICE])
|
||||
language_level_str = "\n\n".join([f"Name: {item['name']}\nDescription: {item['description']}\nCEFR level: {item['cefr_level']}\nIdeal Target Audience: {item['ideal_audience']}" for item in LANGUAGE_LEVEL])
|
||||
|
||||
flow_inputs = {
|
||||
"region": arguments.region,
|
||||
"working_schedule": arguments.working_schedule,
|
||||
"start_date": arguments.start_date,
|
||||
"language": arguments.language,
|
||||
"interaction_mode": arguments.interaction_mode,
|
||||
'tone_of_voice': self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral'),
|
||||
'tone_of_voice_context': tone_of_voice_str,
|
||||
'language_level': self.specialist.configuration.get('language_level', 'Standard'),
|
||||
'language_level_context': language_level_str,
|
||||
'ko_criteria': knockout_competencies,
|
||||
}
|
||||
|
||||
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
||||
|
||||
current_app.logger.debug(f"Flow results: {flow_results}")
|
||||
|
||||
current_app.logger.debug(f"Flow state: {self.flow.state}")
|
||||
|
||||
fields = {}
|
||||
for ko_question in self.flow.state.ko_criteria_questions:
|
||||
fields[ko_question.title] = {
|
||||
"name": ko_question.title,
|
||||
"description": ko_question.title,
|
||||
"context": ko_question.question,
|
||||
"type": "options",
|
||||
"required": True,
|
||||
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
|
||||
}
|
||||
|
||||
ko_form = {
|
||||
"type": "KO_CRITERIA_FORM",
|
||||
"version": "1.0.0",
|
||||
"name": "Starter Questions",
|
||||
"icon": "verified",
|
||||
"fields": fields,
|
||||
}
|
||||
|
||||
results = SpecialistResult.create_for_type(self.type, self.type_version,
|
||||
answer=f"We starten met een aantal KO Criteria vragen",
|
||||
form_request=ko_form,
|
||||
phase="ko_questions")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class SelectionInput(BaseModel):
|
||||
region: str = Field(..., alias="region")
|
||||
working_schedule: Optional[str] = Field(..., alias="working_schedule")
|
||||
start_date: Optional[date] = Field(None, alias="vacancy_text")
|
||||
language: Optional[str] = Field(None, alias="language")
|
||||
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
|
||||
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
|
||||
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
|
||||
language_level: Optional[str] = Field(None, alias="language_level")
|
||||
language_level_context: Optional[str] = Field(None, alias="language_level_context")
|
||||
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
|
||||
question: Optional[str] = Field(None, alias="question")
|
||||
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
|
||||
|
||||
|
||||
class SelectionKOCriteriumScore(BaseModel):
|
||||
criterium: Optional[str] = Field(None, alias="criterium")
|
||||
answer: Optional[str] = Field(None, alias="answer")
|
||||
score: Optional[int] = Field(None, alias="score")
|
||||
|
||||
|
||||
class SelectionCompetencyScore(BaseModel):
|
||||
competency: Optional[str] = Field(None, alias="competency")
|
||||
answer: Optional[str] = Field(None, alias="answer")
|
||||
score: Optional[int] = Field(None, alias="score")
|
||||
|
||||
|
||||
class PersonalContactData(BaseModel):
|
||||
name: str = Field(..., description="Your name", alias="name")
|
||||
email: EmailStr = Field(..., description="Your Name", alias="email")
|
||||
phone: str = Field(..., description="Your Phone Number", alias="phone")
|
||||
address: Optional[str] = Field(None, description="Your Address", alias="address")
|
||||
zip: Optional[str] = Field(None, description="Postal Code", alias="zip")
|
||||
city: Optional[str] = Field(None, description="City", alias="city")
|
||||
country: Optional[str] = Field(None, description="Country", alias="country")
|
||||
consent: bool = Field(..., description="Consent", alias="consent")
|
||||
|
||||
|
||||
class SelectionResult(SpecialistResult):
|
||||
ko_criteria_questions: Optional[List[ListItem]] = Field(None, alias="ko_criteria_questions")
|
||||
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
|
||||
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
|
||||
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
|
||||
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
|
||||
|
||||
|
||||
class SelectionFlowState(EveAIFlowState):
|
||||
"""Flow state for Traicie Role Definition specialist that automatically updates from task outputs"""
|
||||
input: Optional[SelectionInput] = None
|
||||
ko_criteria_questions: Optional[List[KOQuestion]] = Field(None, alias="ko_criteria_questions")
|
||||
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
|
||||
competency_questions: Optional[List[ListItem]] = Field(None, alias="competency_questions")
|
||||
competency_scores: Optional[List[SelectionCompetencyScore]] = Field(None, alias="competency_scores")
|
||||
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
|
||||
phase: Optional[str] = Field(None, alias="phase")
|
||||
interaction_mode: Optional[str] = Field(None, alias="mode")
|
||||
|
||||
|
||||
class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
|
||||
def __init__(self,
|
||||
specialist_executor: CrewAIBaseSpecialistExecutor,
|
||||
ko_def_crew: EveAICrewAICrew,
|
||||
**kwargs):
|
||||
super().__init__(specialist_executor, "Traicie Role Definition Specialist Flow", **kwargs)
|
||||
self.specialist_executor = specialist_executor
|
||||
self.ko_def_crew = ko_def_crew
|
||||
self.exception_raised = False
|
||||
|
||||
@start()
|
||||
def process_inputs(self):
|
||||
return ""
|
||||
|
||||
@listen(process_inputs)
|
||||
async def execute_ko_def_definition(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
try:
|
||||
current_app.logger.debug("execute_ko_interview_definition")
|
||||
crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs)
|
||||
# Unfortunately, crew_output will only contain the output of the latest task.
|
||||
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
||||
# are copies to the flow state.
|
||||
update = {}
|
||||
for task in self.ko_def_crew.tasks:
|
||||
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
|
||||
if task.name == "traicie_ko_criteria_interview_definition_task":
|
||||
# update["competencies"] = task.output.pydantic.competencies
|
||||
self.state.ko_criteria_questions = task.output.pydantic.ko_questions
|
||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||
self.state.phase = "personal_contact_data"
|
||||
current_app.logger.debug(f"State after execute_ko_def_definition: {self.state}")
|
||||
current_app.logger.debug(f"State dump after execute_ko_def_definition: {self.state.model_dump()}")
|
||||
return crew_output
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}")
|
||||
self.exception_raised = True
|
||||
raise e
|
||||
|
||||
async def kickoff_async(self, inputs=None):
|
||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
||||
current_app.logger.debug(f"Inputs: {inputs}")
|
||||
self.state.input = SelectionInput.model_validate(inputs)
|
||||
current_app.logger.debug(f"State: {self.state}")
|
||||
result = await super().kickoff_async(inputs)
|
||||
return self.state
|
||||
Reference in New Issue
Block a user