- RAG Specialist fully implemented new style
- Selection Specialist - VA version - fully implemented - Correction of TRAICIE_ROLE_DEFINITION_SPECIALIST - adaptation to new style - Removal of 'debug' statements
This commit is contained in:
@@ -44,7 +44,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings):
|
|||||||
for i in range(0, len(texts), self.batch_size):
|
for i in range(0, len(texts), self.batch_size):
|
||||||
batch = texts[i:i + self.batch_size]
|
batch = texts[i:i + self.batch_size]
|
||||||
batch_num = i // self.batch_size + 1
|
batch_num = i // self.batch_size + 1
|
||||||
current_app.logger.debug(f"Processing embedding batch {batch_num}, size: {len(batch)}")
|
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
try:
|
try:
|
||||||
@@ -70,9 +69,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings):
|
|||||||
}
|
}
|
||||||
current_event.log_llm_metrics(metrics)
|
current_event.log_llm_metrics(metrics)
|
||||||
|
|
||||||
current_app.logger.debug(f"Batch {batch_num} processed: {len(batch)} texts, "
|
|
||||||
f"{result.usage.total_tokens} tokens, {batch_time:.2f}s")
|
|
||||||
|
|
||||||
# If processing multiple batches, add a small delay to avoid rate limits
|
# If processing multiple batches, add a small delay to avoid rate limits
|
||||||
if len(texts) > self.batch_size and i + self.batch_size < len(texts):
|
if len(texts) > self.batch_size and i + self.batch_size < len(texts):
|
||||||
time.sleep(0.25) # 250ms pause between batches
|
time.sleep(0.25) # 250ms pause between batches
|
||||||
@@ -82,7 +78,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings):
|
|||||||
# If a batch fails, try to process each text individually
|
# If a batch fails, try to process each text individually
|
||||||
for j, text in enumerate(batch):
|
for j, text in enumerate(batch):
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug(f"Attempting individual embedding for item {i + j}")
|
|
||||||
single_start_time = time.time()
|
single_start_time = time.time()
|
||||||
single_result = self.client.embeddings.create(
|
single_result = self.client.embeddings.create(
|
||||||
model=self.model,
|
model=self.model,
|
||||||
|
|||||||
@@ -18,8 +18,10 @@ class HumanAnswerServices:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check_additional_information(tenant_id: int, question: str, answer: str, language_iso: str) -> bool:
|
def check_additional_information(tenant_id: int, question: str, answer: str, language_iso: str) -> bool:
|
||||||
return HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso,
|
result = HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso,
|
||||||
"check_additional_information", "Check Additional Information")
|
"check_additional_information", "Check Additional Information")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_answer_to_question(tenant_id: int, question: str, answer: str, language_iso: str) -> str:
|
def get_answer_to_question(tenant_id: int, question: str, answer: str, language_iso: str) -> str:
|
||||||
@@ -66,7 +68,6 @@ class HumanAnswerServices:
|
|||||||
chain = (setup | check_answer_prompt | structured_llm )
|
chain = (setup | check_answer_prompt | structured_llm )
|
||||||
|
|
||||||
raw_answer = chain.invoke(prompt_params)
|
raw_answer = chain.invoke(prompt_params)
|
||||||
current_app.logger.debug(f"Raw answer: {raw_answer}")
|
|
||||||
|
|
||||||
return raw_answer.answer
|
return raw_answer.answer
|
||||||
|
|
||||||
@@ -89,7 +90,6 @@ class HumanAnswerServices:
|
|||||||
chain = (setup | check_answer_prompt | structured_llm)
|
chain = (setup | check_answer_prompt | structured_llm)
|
||||||
|
|
||||||
raw_answer = chain.invoke(prompt_params)
|
raw_answer = chain.invoke(prompt_params)
|
||||||
current_app.logger.debug(f"Raw answer: {raw_answer}")
|
|
||||||
|
|
||||||
return raw_answer.answer
|
return raw_answer.answer
|
||||||
|
|
||||||
|
|||||||
6
common/utils/cache/translation_cache.py
vendored
6
common/utils/cache/translation_cache.py
vendored
@@ -68,7 +68,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]):
|
|||||||
|
|
||||||
setattr(translation, column.name, value)
|
setattr(translation, column.name, value)
|
||||||
|
|
||||||
current_app.logger.debug(f"Translation Cache Retrieved: {translation}")
|
|
||||||
metrics = {
|
metrics = {
|
||||||
'total_tokens': translation.prompt_tokens + translation.completion_tokens,
|
'total_tokens': translation.prompt_tokens + translation.completion_tokens,
|
||||||
'prompt_tokens': translation.prompt_tokens,
|
'prompt_tokens': translation.prompt_tokens,
|
||||||
@@ -109,7 +108,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]):
|
|||||||
"""
|
"""
|
||||||
if not context:
|
if not context:
|
||||||
context = 'No context provided.'
|
context = 'No context provided.'
|
||||||
current_app.logger.debug(f"Getting translation for text: {text[:10]}..., target_lang: {target_lang}, source_lang: {source_lang}, context: {context[:10]}...")
|
|
||||||
|
|
||||||
def creator_func(hash_key: str) -> Optional[TranslationCache]:
|
def creator_func(hash_key: str) -> Optional[TranslationCache]:
|
||||||
# Check if translation already exists in database
|
# Check if translation already exists in database
|
||||||
@@ -125,8 +123,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]):
|
|||||||
'time_elapsed': 0,
|
'time_elapsed': 0,
|
||||||
'interaction_type': 'LLM'
|
'interaction_type': 'LLM'
|
||||||
}
|
}
|
||||||
current_app.logger.debug(f"Found existing translation in DB: {existing_translation.cache_key}")
|
|
||||||
current_app.logger.debug(f"Metrics: {metrics}")
|
|
||||||
current_event.log_llm_metrics(metrics)
|
current_event.log_llm_metrics(metrics)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
return existing_translation
|
return existing_translation
|
||||||
@@ -165,7 +161,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]):
|
|||||||
|
|
||||||
# Generate the hash key using your existing method
|
# Generate the hash key using your existing method
|
||||||
hash_key = self._generate_cache_key(text, target_lang, source_lang, context)
|
hash_key = self._generate_cache_key(text, target_lang, source_lang, context)
|
||||||
current_app.logger.debug(f"Generated hash key: {hash_key}")
|
|
||||||
|
|
||||||
# Pass the hash_key to the get method
|
# Pass the hash_key to the get method
|
||||||
return self.get(creator_func, hash_key=hash_key)
|
return self.get(creator_func, hash_key=hash_key)
|
||||||
@@ -189,7 +184,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]):
|
|||||||
def translate_text(self, text_to_translate: str, target_lang: str, source_lang: str = None, context: str = None) \
|
def translate_text(self, text_to_translate: str, target_lang: str, source_lang: str = None, context: str = None) \
|
||||||
-> tuple[str, dict[str, int | float]]:
|
-> tuple[str, dict[str, int | float]]:
|
||||||
target_language = current_app.config['SUPPORTED_LANGUAGE_ISO639_1_LOOKUP'][target_lang]
|
target_language = current_app.config['SUPPORTED_LANGUAGE_ISO639_1_LOOKUP'][target_lang]
|
||||||
current_app.logger.debug(f"Target language: {target_language}")
|
|
||||||
prompt_params = {
|
prompt_params = {
|
||||||
"text_to_translate": text_to_translate,
|
"text_to_translate": text_to_translate,
|
||||||
"target_language": target_language,
|
"target_language": target_language,
|
||||||
|
|||||||
@@ -44,13 +44,11 @@ def get_default_chat_customisation(tenant_customisation=None):
|
|||||||
if isinstance(tenant_customisation, str):
|
if isinstance(tenant_customisation, str):
|
||||||
try:
|
try:
|
||||||
tenant_customisation = json.loads(tenant_customisation)
|
tenant_customisation = json.loads(tenant_customisation)
|
||||||
current_app.logger.debug(f"Converted JSON string to dict: {tenant_customisation}")
|
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
current_app.logger.error(f"Error parsing JSON customisation: {e}")
|
current_app.logger.error(f"Error parsing JSON customisation: {e}")
|
||||||
return default_customisation
|
return default_customisation
|
||||||
|
|
||||||
# Update with tenant customization
|
# Update with tenant customization
|
||||||
current_app.logger.debug(f"Tenant customisation - in default creation: {tenant_customisation}")
|
|
||||||
if tenant_customisation:
|
if tenant_customisation:
|
||||||
for key, value in tenant_customisation.items():
|
for key, value in tenant_customisation.items():
|
||||||
if key in customisation:
|
if key in customisation:
|
||||||
|
|||||||
@@ -6,22 +6,17 @@ from flask import current_app
|
|||||||
|
|
||||||
|
|
||||||
def send_email(to_email, to_name, subject, html):
|
def send_email(to_email, to_name, subject, html):
|
||||||
current_app.logger.debug(f"Sending email to {to_email} with subject {subject}")
|
|
||||||
access_key = current_app.config['SW_EMAIL_ACCESS_KEY']
|
access_key = current_app.config['SW_EMAIL_ACCESS_KEY']
|
||||||
secret_key = current_app.config['SW_EMAIL_SECRET_KEY']
|
secret_key = current_app.config['SW_EMAIL_SECRET_KEY']
|
||||||
default_project_id = current_app.config['SW_PROJECT']
|
default_project_id = current_app.config['SW_PROJECT']
|
||||||
default_region = "fr-par"
|
default_region = "fr-par"
|
||||||
current_app.logger.debug(f"Access Key: {access_key}\nSecret Key: {secret_key}\n"
|
|
||||||
f"Default Project ID: {default_project_id}\nDefault Region: {default_region}")
|
|
||||||
client = Client(
|
client = Client(
|
||||||
access_key=access_key,
|
access_key=access_key,
|
||||||
secret_key=secret_key,
|
secret_key=secret_key,
|
||||||
default_project_id=default_project_id,
|
default_project_id=default_project_id,
|
||||||
default_region=default_region
|
default_region=default_region
|
||||||
)
|
)
|
||||||
current_app.logger.debug(f"Scaleway Client Initialized")
|
|
||||||
tem = TemV1Alpha1API(client)
|
tem = TemV1Alpha1API(client)
|
||||||
current_app.logger.debug(f"Tem Initialized")
|
|
||||||
from_ = CreateEmailRequestAddress(email=current_app.config['SW_EMAIL_SENDER'],
|
from_ = CreateEmailRequestAddress(email=current_app.config['SW_EMAIL_SENDER'],
|
||||||
name=current_app.config['SW_EMAIL_NAME'])
|
name=current_app.config['SW_EMAIL_NAME'])
|
||||||
to_ = CreateEmailRequestAddress(email=to_email, name=to_name)
|
to_ = CreateEmailRequestAddress(email=to_email, name=to_name)
|
||||||
@@ -34,7 +29,6 @@ def send_email(to_email, to_name, subject, html):
|
|||||||
html=html,
|
html=html,
|
||||||
project_id=default_project_id,
|
project_id=default_project_id,
|
||||||
)
|
)
|
||||||
current_app.logger.debug(f"Email sent to {to_email}")
|
|
||||||
|
|
||||||
|
|
||||||
def html_to_text(html_content):
|
def html_to_text(html_content):
|
||||||
|
|||||||
@@ -98,7 +98,6 @@ def get_pagination_html(pagination, endpoint, **kwargs):
|
|||||||
if page:
|
if page:
|
||||||
is_active = 'active' if page == pagination.page else ''
|
is_active = 'active' if page == pagination.page else ''
|
||||||
url = url_for(endpoint, page=page, **kwargs)
|
url = url_for(endpoint, page=page, **kwargs)
|
||||||
current_app.logger.debug(f"URL for page {page}: {url}")
|
|
||||||
html.append(f'<li class="page-item {is_active}"><a class="page-link" href="{url}">{page}</a></li>')
|
html.append(f'<li class="page-item {is_active}"><a class="page-link" href="{url}">{page}</a></li>')
|
||||||
else:
|
else:
|
||||||
html.append('<li class="page-item disabled"><span class="page-link">...</span></li>')
|
html.append('<li class="page-item disabled"><span class="page-link">...</span></li>')
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ role: >
|
|||||||
{tenant_name} Spokesperson. {custom_role}
|
{tenant_name} Spokesperson. {custom_role}
|
||||||
goal: >
|
goal: >
|
||||||
You get questions by a human correspondent, and give answers based on a given context, taking into account the history
|
You get questions by a human correspondent, and give answers based on a given context, taking into account the history
|
||||||
of the current conversation. {custom_goal}
|
of the current conversation.
|
||||||
|
{custom_goal}
|
||||||
backstory: >
|
backstory: >
|
||||||
You are the primary contact for {tenant_name}. You are known by {name}, and can be addressed by this name, or you. You are
|
You are the primary contact for {tenant_name}. You are known by {name}, and can be addressed by this name, or you. You are
|
||||||
a very good communicator, and adapt to the style used by the human asking for information (e.g. formal or informal).
|
a very good communicator, and adapt to the style used by the human asking for information (e.g. formal or informal).
|
||||||
@@ -13,7 +14,7 @@ backstory: >
|
|||||||
language the context provided to you is in. You are participating in a conversation, not writing e.g. an email. Do not
|
language the context provided to you is in. You are participating in a conversation, not writing e.g. an email. Do not
|
||||||
include a salutation or closing greeting in your answer.
|
include a salutation or closing greeting in your answer.
|
||||||
{custom_backstory}
|
{custom_backstory}
|
||||||
full_model_name: "mistral.mistral-small-latest"
|
full_model_name: "mistral.mistral-medium-latest"
|
||||||
temperature: 0.3
|
temperature: 0.3
|
||||||
metadata:
|
metadata:
|
||||||
author: "Josako"
|
author: "Josako"
|
||||||
|
|||||||
@@ -1,13 +1,17 @@
|
|||||||
version: "1.0.0"
|
version: "1.0.0"
|
||||||
content: >
|
content: >
|
||||||
Check if additional information or questions are available in the following answer (answer in between triple
|
Check if there are other elements available in the provided text (in between triple $) than answers to the
|
||||||
backquotes):
|
following question (in between triple €):
|
||||||
|
|
||||||
```{answer}```
|
€€€
|
||||||
|
{question}
|
||||||
|
€€€
|
||||||
|
|
||||||
in addition to answers to the following question (in between triple backquotes):
|
Provided text:
|
||||||
|
|
||||||
```{question}```
|
$$$
|
||||||
|
{answer}
|
||||||
|
$$$
|
||||||
|
|
||||||
Answer with True or False, without additional information.
|
Answer with True or False, without additional information.
|
||||||
llm_model: "mistral.mistral-medium-latest"
|
llm_model: "mistral.mistral-medium-latest"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ content: |
|
|||||||
question is understandable without that history. The conversation is a consequence of questions and context provided
|
question is understandable without that history. The conversation is a consequence of questions and context provided
|
||||||
by the HUMAN, and the AI (you) answering back, in chronological order. The most recent (i.e. last) elements are the
|
by the HUMAN, and the AI (you) answering back, in chronological order. The most recent (i.e. last) elements are the
|
||||||
most important when detailing the question.
|
most important when detailing the question.
|
||||||
You answer by stating the detailed question in {language}.
|
You return the only the detailed question in {language}. Without any additional information.
|
||||||
History:
|
History:
|
||||||
```{history}```
|
```{history}```
|
||||||
Question to be detailed:
|
Question to be detailed:
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ arguments:
|
|||||||
name: "Interaction Mode"
|
name: "Interaction Mode"
|
||||||
type: "enum"
|
type: "enum"
|
||||||
description: "The interaction mode the specialist will start working in."
|
description: "The interaction mode the specialist will start working in."
|
||||||
allowed_values: ["orientation", "seduction"]
|
allowed_values: ["orientation", "selection"]
|
||||||
default: "orientation"
|
default: "orientation"
|
||||||
required: true
|
required: true
|
||||||
results:
|
results:
|
||||||
|
|||||||
@@ -8,14 +8,14 @@ task_description: >
|
|||||||
Use the following {language} in your communication, and cite the sources used at the end of the full conversation.
|
Use the following {language} in your communication, and cite the sources used at the end of the full conversation.
|
||||||
If the question cannot be answered using the given context, answer "I have insufficient information to answer this
|
If the question cannot be answered using the given context, answer "I have insufficient information to answer this
|
||||||
question."
|
question."
|
||||||
Context (in between triple backquotes):
|
Context (in between triple $):
|
||||||
```{context}```
|
$$${context}$$$
|
||||||
History (in between triple backquotes):
|
History (in between triple €):
|
||||||
```{history}```
|
€€€{history}€€€
|
||||||
Question (in between triple backquotes):
|
Question (in between triple £):
|
||||||
```{question}```
|
£££{question}£££
|
||||||
expected_output: >
|
expected_output: >
|
||||||
|
Your answer.
|
||||||
metadata:
|
metadata:
|
||||||
author: "Josako"
|
author: "Josako"
|
||||||
date_added: "2025-01-08"
|
date_added: "2025-01-08"
|
||||||
|
|||||||
@@ -121,7 +121,6 @@ def view_content(content_type):
|
|||||||
content_type (str): Type content (eg. 'changelog', 'terms', 'privacy')
|
content_type (str): Type content (eg. 'changelog', 'terms', 'privacy')
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug(f"Showing content {content_type}")
|
|
||||||
major_minor = request.args.get('version')
|
major_minor = request.args.get('version')
|
||||||
patch = request.args.get('patch')
|
patch = request.args.get('patch')
|
||||||
|
|
||||||
@@ -163,5 +162,4 @@ def view_content(content_type):
|
|||||||
@roles_accepted('Super User', 'Partner Admin', 'Tenant Admin')
|
@roles_accepted('Super User', 'Partner Admin', 'Tenant Admin')
|
||||||
def release_notes():
|
def release_notes():
|
||||||
"""Doorverwijzen naar de nieuwe content view voor changelog"""
|
"""Doorverwijzen naar de nieuwe content view voor changelog"""
|
||||||
current_app.logger.debug(f"Redirecting to content viewer")
|
|
||||||
return redirect(prefixed_url_for('basic_bp.view_content', content_type='changelog'))
|
return redirect(prefixed_url_for('basic_bp.view_content', content_type='changelog'))
|
||||||
|
|||||||
@@ -137,14 +137,12 @@ class RetrieverForm(FlaskForm):
|
|||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
tenant_id = session.get('tenant').get('id')
|
tenant_id = session.get('tenant').get('id')
|
||||||
choices = TenantServices.get_available_types_for_tenant(tenant_id, "retrievers")
|
choices = TenantServices.get_available_types_for_tenant(tenant_id, "retrievers")
|
||||||
current_app.logger.debug(f"Potential choices: {choices}")
|
|
||||||
# Dynamically populate the 'type' field using the constructor
|
# Dynamically populate the 'type' field using the constructor
|
||||||
type_choices = []
|
type_choices = []
|
||||||
for key, value in choices.items():
|
for key, value in choices.items():
|
||||||
valid_catalog_types = value.get('valid_catalog_types', None)
|
valid_catalog_types = value.get('valid_catalog_types', None)
|
||||||
if valid_catalog_types:
|
if valid_catalog_types:
|
||||||
catalog_type = session.get('catalog').get('type')
|
catalog_type = session.get('catalog').get('type')
|
||||||
current_app.logger.debug(f"Check {catalog_type} in {valid_catalog_types}")
|
|
||||||
if catalog_type in valid_catalog_types:
|
if catalog_type in valid_catalog_types:
|
||||||
type_choices.append((key, value['name']))
|
type_choices.append((key, value['name']))
|
||||||
else: # Retriever type is valid for all catalog types
|
else: # Retriever type is valid for all catalog types
|
||||||
|
|||||||
@@ -668,7 +668,6 @@ def handle_document_version_selection():
|
|||||||
return redirect(prefixed_url_for('document_bp.document_versions_list'))
|
return redirect(prefixed_url_for('document_bp.document_versions_list'))
|
||||||
|
|
||||||
action = request.form['action']
|
action = request.form['action']
|
||||||
current_app.logger.debug(f'Action: {action}')
|
|
||||||
|
|
||||||
match action:
|
match action:
|
||||||
case 'edit_document_version':
|
case 'edit_document_version':
|
||||||
@@ -747,7 +746,6 @@ def document_versions_list():
|
|||||||
@document_bp.route('/view_document_version_markdown/<int:document_version_id>', methods=['GET'])
|
@document_bp.route('/view_document_version_markdown/<int:document_version_id>', methods=['GET'])
|
||||||
@roles_accepted('Super User', 'Partner Admin', 'Tenant Admin')
|
@roles_accepted('Super User', 'Partner Admin', 'Tenant Admin')
|
||||||
def view_document_version_markdown(document_version_id):
|
def view_document_version_markdown(document_version_id):
|
||||||
current_app.logger.debug(f'Viewing document version markdown {document_version_id}')
|
|
||||||
# Retrieve document version
|
# Retrieve document version
|
||||||
document_version = DocumentVersion.query.get_or_404(document_version_id)
|
document_version = DocumentVersion.query.get_or_404(document_version_id)
|
||||||
|
|
||||||
@@ -759,7 +757,6 @@ def view_document_version_markdown(document_version_id):
|
|||||||
markdown_filename = f"{document_version.id}.md"
|
markdown_filename = f"{document_version.id}.md"
|
||||||
markdown_object_name = minio_client.generate_object_name(document_version.doc_id, document_version.language,
|
markdown_object_name = minio_client.generate_object_name(document_version.doc_id, document_version.language,
|
||||||
document_version.id, markdown_filename)
|
document_version.id, markdown_filename)
|
||||||
current_app.logger.debug(f'Markdown object name: {markdown_object_name}')
|
|
||||||
# Download actual markdown file
|
# Download actual markdown file
|
||||||
file_data = minio_client.download_document_file(
|
file_data = minio_client.download_document_file(
|
||||||
tenant_id,
|
tenant_id,
|
||||||
@@ -769,7 +766,6 @@ def view_document_version_markdown(document_version_id):
|
|||||||
|
|
||||||
# Decodeer de binaire data naar UTF-8 tekst
|
# Decodeer de binaire data naar UTF-8 tekst
|
||||||
markdown_content = file_data.decode('utf-8')
|
markdown_content = file_data.decode('utf-8')
|
||||||
current_app.logger.debug(f'Markdown content: {markdown_content}')
|
|
||||||
|
|
||||||
# Render de template met de markdown inhoud
|
# Render de template met de markdown inhoud
|
||||||
return render_template(
|
return render_template(
|
||||||
|
|||||||
@@ -66,8 +66,6 @@ class OrderedListField(TextAreaField):
|
|||||||
else:
|
else:
|
||||||
existing_render_kw = {}
|
existing_render_kw = {}
|
||||||
|
|
||||||
current_app.logger.debug(f"incomming render_kw for ordered list field: {existing_render_kw}")
|
|
||||||
|
|
||||||
# Stel nieuwe render_kw samen
|
# Stel nieuwe render_kw samen
|
||||||
new_render_kw = {
|
new_render_kw = {
|
||||||
'data-list-type': list_type,
|
'data-list-type': list_type,
|
||||||
@@ -91,8 +89,6 @@ class OrderedListField(TextAreaField):
|
|||||||
if key != 'class': # Klassen hebben we al verwerkt
|
if key != 'class': # Klassen hebben we al verwerkt
|
||||||
new_render_kw[key] = value
|
new_render_kw[key] = value
|
||||||
|
|
||||||
current_app.logger.debug(f"final render_kw for ordered list field: {new_render_kw}")
|
|
||||||
|
|
||||||
# Update kwargs met de nieuwe gecombineerde render_kw
|
# Update kwargs met de nieuwe gecombineerde render_kw
|
||||||
kwargs['render_kw'] = new_render_kw
|
kwargs['render_kw'] = new_render_kw
|
||||||
|
|
||||||
@@ -327,7 +323,6 @@ class DynamicFormBase(FlaskForm):
|
|||||||
for the collection_name and may also contain list_type definitions
|
for the collection_name and may also contain list_type definitions
|
||||||
initial_data: Optional initial data for the fields
|
initial_data: Optional initial data for the fields
|
||||||
"""
|
"""
|
||||||
current_app.logger.debug(f"Adding dynamic fields for collection {collection_name} with config: {config}")
|
|
||||||
|
|
||||||
if isinstance(initial_data, str):
|
if isinstance(initial_data, str):
|
||||||
try:
|
try:
|
||||||
@@ -535,7 +530,7 @@ class DynamicFormBase(FlaskForm):
|
|||||||
list_types[list_type] = specialist_config[list_type]
|
list_types[list_type] = specialist_config[list_type]
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.debug(f"Error checking specialist {specialist_type}: {e}")
|
current_app.logger.error(f"Error checking specialist {specialist_type}: {e}")
|
||||||
continue
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.error(f"Error retrieving specialist configurations: {e}")
|
current_app.logger.error(f"Error retrieving specialist configurations: {e}")
|
||||||
@@ -575,7 +570,6 @@ class DynamicFormBase(FlaskForm):
|
|||||||
# Parse JSON for special field types
|
# Parse JSON for special field types
|
||||||
if field.type == 'BooleanField':
|
if field.type == 'BooleanField':
|
||||||
data[original_field_name] = full_field_name in self.raw_formdata
|
data[original_field_name] = full_field_name in self.raw_formdata
|
||||||
current_app.logger.debug(f"Value for {original_field_name} is {data[original_field_name]}")
|
|
||||||
elif isinstance(field, (TaggingFieldsField, TaggingFieldsFilterField, DynamicArgumentsField, OrderedListField)) and field.data:
|
elif isinstance(field, (TaggingFieldsField, TaggingFieldsFilterField, DynamicArgumentsField, OrderedListField)) and field.data:
|
||||||
try:
|
try:
|
||||||
data[original_field_name] = json.loads(field.data)
|
data[original_field_name] = json.loads(field.data)
|
||||||
|
|||||||
@@ -76,7 +76,6 @@ def handle_chat_session_selection():
|
|||||||
cs_id = ast.literal_eval(chat_session_identification).get('value')
|
cs_id = ast.literal_eval(chat_session_identification).get('value')
|
||||||
|
|
||||||
action = request.form['action']
|
action = request.form['action']
|
||||||
current_app.logger.debug(f'Handle Chat Session Selection Action: {action}')
|
|
||||||
|
|
||||||
match action:
|
match action:
|
||||||
case 'view_chat_session':
|
case 'view_chat_session':
|
||||||
@@ -503,7 +502,6 @@ def execute_specialist(specialist_id):
|
|||||||
if form.validate_on_submit():
|
if form.validate_on_submit():
|
||||||
# We're only interested in gathering the dynamic arguments
|
# We're only interested in gathering the dynamic arguments
|
||||||
arguments = form.get_dynamic_data("arguments")
|
arguments = form.get_dynamic_data("arguments")
|
||||||
current_app.logger.debug(f"Executing specialist {specialist.id} with arguments: {arguments}")
|
|
||||||
session_id = SpecialistServices.start_session()
|
session_id = SpecialistServices.start_session()
|
||||||
execution_task = SpecialistServices.execute_specialist(
|
execution_task = SpecialistServices.execute_specialist(
|
||||||
tenant_id=session.get('tenant').get('id'),
|
tenant_id=session.get('tenant').get('id'),
|
||||||
@@ -512,7 +510,6 @@ def execute_specialist(specialist_id):
|
|||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
user_timezone=session.get('tenant').get('timezone')
|
user_timezone=session.get('tenant').get('timezone')
|
||||||
)
|
)
|
||||||
current_app.logger.debug(f"Execution task for specialist {specialist.id} created: {execution_task}")
|
|
||||||
return redirect(prefixed_url_for('interaction_bp.session_interactions_by_session_id', session_id=session_id))
|
return redirect(prefixed_url_for('interaction_bp.session_interactions_by_session_id', session_id=session_id))
|
||||||
|
|
||||||
return render_template('interaction/execute_specialist.html', form=form)
|
return render_template('interaction/execute_specialist.html', form=form)
|
||||||
@@ -620,7 +617,6 @@ def specialist_magic_link():
|
|||||||
# Define the make valid for this magic link
|
# Define the make valid for this magic link
|
||||||
specialist = Specialist.query.get(new_specialist_magic_link.specialist_id)
|
specialist = Specialist.query.get(new_specialist_magic_link.specialist_id)
|
||||||
make_id = specialist.configuration.get('make', None)
|
make_id = specialist.configuration.get('make', None)
|
||||||
current_app.logger.debug(f"make_id defined in specialist: {make_id}")
|
|
||||||
if make_id:
|
if make_id:
|
||||||
new_specialist_magic_link.tenant_make_id = make_id
|
new_specialist_magic_link.tenant_make_id = make_id
|
||||||
elif session.get('tenant').get('default_tenant_make_id'):
|
elif session.get('tenant').get('default_tenant_make_id'):
|
||||||
@@ -707,10 +703,6 @@ def edit_specialist_magic_link(specialist_magic_link_id):
|
|||||||
|
|
||||||
# Store the data URI in the form data
|
# Store the data URI in the form data
|
||||||
form.qr_code_url.data = data_uri
|
form.qr_code_url.data = data_uri
|
||||||
|
|
||||||
current_app.logger.debug(f"QR code generated successfully for {magic_link_code}")
|
|
||||||
current_app.logger.debug(f"QR code data URI starts with: {data_uri[:50]}...")
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.error(f"Failed to generate QR code: {str(e)}")
|
current_app.logger.error(f"Failed to generate QR code: {str(e)}")
|
||||||
form.qr_code_url.data = "Error generating QR code"
|
form.qr_code_url.data = "Error generating QR code"
|
||||||
@@ -794,7 +786,6 @@ def assets():
|
|||||||
def handle_asset_selection():
|
def handle_asset_selection():
|
||||||
action = request.form.get('action')
|
action = request.form.get('action')
|
||||||
asset_id = request.form.get('selected_row')
|
asset_id = request.form.get('selected_row')
|
||||||
current_app.logger.debug(f"Action: {action}, Asset ID: {asset_id}")
|
|
||||||
|
|
||||||
if action == 'edit_asset':
|
if action == 'edit_asset':
|
||||||
return redirect(prefixed_url_for('interaction_bp.edit_asset', asset_id=asset_id))
|
return redirect(prefixed_url_for('interaction_bp.edit_asset', asset_id=asset_id))
|
||||||
|
|||||||
@@ -51,7 +51,6 @@ class AssetsListView(FilteredListView):
|
|||||||
else:
|
else:
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
current_app.logger.debug(f"Assets retrieved: {pagination.items}")
|
|
||||||
rows = [
|
rows = [
|
||||||
[
|
[
|
||||||
{'value': item.id, 'class': '', 'type': 'text'},
|
{'value': item.id, 'class': '', 'type': 'text'},
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ class DocumentListView(FilteredListView):
|
|||||||
|
|
||||||
def get_query(self):
|
def get_query(self):
|
||||||
catalog_id = session.get('catalog_id')
|
catalog_id = session.get('catalog_id')
|
||||||
current_app.logger.debug(f"Catalog ID: {catalog_id}")
|
|
||||||
return Document.query.filter_by(catalog_id=catalog_id)
|
return Document.query.filter_by(catalog_id=catalog_id)
|
||||||
|
|
||||||
def apply_filters(self, query):
|
def apply_filters(self, query):
|
||||||
@@ -57,7 +56,6 @@ class DocumentListView(FilteredListView):
|
|||||||
else:
|
else:
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
current_app.logger.debug(f"Items retrieved: {pagination.items}")
|
|
||||||
rows = [
|
rows = [
|
||||||
[
|
[
|
||||||
{'value': item.id, 'class': '', 'type': 'text'},
|
{'value': item.id, 'class': '', 'type': 'text'},
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ class FullDocumentListView(FilteredListView):
|
|||||||
|
|
||||||
def get_query(self):
|
def get_query(self):
|
||||||
catalog_id = session.get('catalog_id')
|
catalog_id = session.get('catalog_id')
|
||||||
current_app.logger.debug(f"Catalog ID: {catalog_id}")
|
|
||||||
|
|
||||||
# Fix: Selecteer alleen de id kolom in de subquery
|
# Fix: Selecteer alleen de id kolom in de subquery
|
||||||
latest_version_subquery = (
|
latest_version_subquery = (
|
||||||
|
|||||||
@@ -57,7 +57,6 @@ def edit_partner(partner_id):
|
|||||||
form.tenant.data = tenant.name
|
form.tenant.data = tenant.name
|
||||||
|
|
||||||
if form.validate_on_submit():
|
if form.validate_on_submit():
|
||||||
current_app.logger.debug(f"Form data for Partner: {form.data}")
|
|
||||||
# Populate the user with form data
|
# Populate the user with form data
|
||||||
form.populate_obj(partner)
|
form.populate_obj(partner)
|
||||||
update_logging_information(partner, dt.now(tz.utc))
|
update_logging_information(partner, dt.now(tz.utc))
|
||||||
@@ -88,8 +87,6 @@ def partners():
|
|||||||
Tenant.name.label('name')
|
Tenant.name.label('name')
|
||||||
).join(Tenant, Partner.tenant_id == Tenant.id).order_by(Partner.id))
|
).join(Tenant, Partner.tenant_id == Tenant.id).order_by(Partner.id))
|
||||||
|
|
||||||
current_app.logger.debug(f'{format_query_results(query)}')
|
|
||||||
|
|
||||||
pagination = query.paginate(page=page, per_page=per_page)
|
pagination = query.paginate(page=page, per_page=per_page)
|
||||||
the_partners = pagination.items
|
the_partners = pagination.items
|
||||||
|
|
||||||
@@ -170,17 +167,10 @@ def edit_partner_service(partner_service_id):
|
|||||||
form.add_dynamic_fields("configuration", partner_service_config, partner_service.configuration)
|
form.add_dynamic_fields("configuration", partner_service_config, partner_service.configuration)
|
||||||
form.add_dynamic_fields("permissions", partner_service_config, partner_service.permissions)
|
form.add_dynamic_fields("permissions", partner_service_config, partner_service.permissions)
|
||||||
|
|
||||||
if request.method == 'POST':
|
|
||||||
current_app.logger.debug(f"Form returned: {form.data}")
|
|
||||||
raw_form_data = request.form.to_dict()
|
|
||||||
current_app.logger.debug(f"Raw form data: {raw_form_data}")
|
|
||||||
|
|
||||||
if form.validate_on_submit():
|
if form.validate_on_submit():
|
||||||
form.populate_obj(partner_service)
|
form.populate_obj(partner_service)
|
||||||
partner_service.configuration = form.get_dynamic_data('configuration')
|
partner_service.configuration = form.get_dynamic_data('configuration')
|
||||||
partner_service.permissions = form.get_dynamic_data('permissions')
|
partner_service.permissions = form.get_dynamic_data('permissions')
|
||||||
current_app.logger.debug(f"Partner Service configuration: {partner_service.configuration}")
|
|
||||||
current_app.logger.debug(f"Partner Service permissions: {partner_service.permissions}")
|
|
||||||
|
|
||||||
update_logging_information(partner_service, dt.now(tz.utc))
|
update_logging_information(partner_service, dt.now(tz.utc))
|
||||||
|
|
||||||
|
|||||||
@@ -172,11 +172,6 @@ def validate_make_name(form, field):
|
|||||||
# Check if tenant_make already exists in the database
|
# Check if tenant_make already exists in the database
|
||||||
existing_make = TenantMake.query.filter_by(name=field.data).first()
|
existing_make = TenantMake.query.filter_by(name=field.data).first()
|
||||||
|
|
||||||
if existing_make:
|
|
||||||
current_app.logger.debug(f'Existing make: {existing_make.id}')
|
|
||||||
current_app.logger.debug(f'Form has id: {hasattr(form, 'id')}')
|
|
||||||
if hasattr(form, 'id'):
|
|
||||||
current_app.logger.debug(f'Form has id: {form.id.data}')
|
|
||||||
if existing_make:
|
if existing_make:
|
||||||
if not hasattr(form, 'id') or form.id.data != existing_make.id:
|
if not hasattr(form, 'id') or form.id.data != existing_make.id:
|
||||||
raise ValidationError(f'A Make with name "{field.data}" already exists. Choose another name.')
|
raise ValidationError(f'A Make with name "{field.data}" already exists. Choose another name.')
|
||||||
|
|||||||
@@ -147,12 +147,8 @@ def select_tenant():
|
|||||||
# Start with a base query
|
# Start with a base query
|
||||||
query = Tenant.query
|
query = Tenant.query
|
||||||
|
|
||||||
current_app.logger.debug("We proberen het scherm op te bouwen")
|
|
||||||
current_app.logger.debug(f"Session: {session}")
|
|
||||||
|
|
||||||
# Apply different filters based on user role
|
# Apply different filters based on user role
|
||||||
if current_user.has_roles('Partner Admin') and 'partner' in session:
|
if current_user.has_roles('Partner Admin') and 'partner' in session:
|
||||||
current_app.logger.debug("We zitten in partner mode")
|
|
||||||
# Get the partner's management service
|
# Get the partner's management service
|
||||||
management_service = next((service for service in session['partner']['services']
|
management_service = next((service for service in session['partner']['services']
|
||||||
if service.get('type') == 'MANAGEMENT_SERVICE'), None)
|
if service.get('type') == 'MANAGEMENT_SERVICE'), None)
|
||||||
@@ -175,7 +171,6 @@ def select_tenant():
|
|||||||
# Filter query to only show allowed tenants
|
# Filter query to only show allowed tenants
|
||||||
query = query.filter(Tenant.id.in_(allowed_tenant_ids))
|
query = query.filter(Tenant.id.in_(allowed_tenant_ids))
|
||||||
|
|
||||||
current_app.logger.debug("We zitten na partner service selectie")
|
|
||||||
# Apply form filters (for both Super User and Partner Admin)
|
# Apply form filters (for both Super User and Partner Admin)
|
||||||
if filter_form.validate_on_submit():
|
if filter_form.validate_on_submit():
|
||||||
if filter_form.types.data:
|
if filter_form.types.data:
|
||||||
@@ -722,9 +717,7 @@ def edit_tenant_make(tenant_make_id):
|
|||||||
form.populate_obj(tenant_make)
|
form.populate_obj(tenant_make)
|
||||||
tenant_make.chat_customisation_options = form.get_dynamic_data("configuration")
|
tenant_make.chat_customisation_options = form.get_dynamic_data("configuration")
|
||||||
# Verwerk allowed_languages als array
|
# Verwerk allowed_languages als array
|
||||||
current_app.logger.debug(f"Allowed languages: {form.allowed_languages.data}")
|
|
||||||
tenant_make.allowed_languages = form.allowed_languages.data if form.allowed_languages.data else None
|
tenant_make.allowed_languages = form.allowed_languages.data if form.allowed_languages.data else None
|
||||||
current_app.logger.debug(f"Updated allowed languages: {tenant_make.allowed_languages}")
|
|
||||||
|
|
||||||
# Update logging information
|
# Update logging information
|
||||||
update_logging_information(tenant_make, dt.now(tz.utc))
|
update_logging_information(tenant_make, dt.now(tz.utc))
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ from pydantic import BaseModel, Field
|
|||||||
|
|
||||||
|
|
||||||
class QAOutput(BaseModel):
|
class QAOutput(BaseModel):
|
||||||
answer: bool = Field(None, description="True or False")
|
answer: bool = Field(None, description="Your answer, True or False")
|
||||||
|
|||||||
@@ -4,6 +4,6 @@ from pydantic import BaseModel, Field
|
|||||||
|
|
||||||
|
|
||||||
class RAGOutput(BaseModel):
|
class RAGOutput(BaseModel):
|
||||||
answer: str = Field(None, description="Answer to the questions asked")
|
answer: str = Field(None, description="Answer to the questions asked, in Markdown format.")
|
||||||
insufficient_info: bool = Field(None, description="An indication if there's insufficient information to answer")
|
insufficient_info: bool = Field(None, description="An indication if there's insufficient information to answer")
|
||||||
|
|
||||||
|
|||||||
@@ -108,6 +108,5 @@ def get_retriever_class(retriever_type: str, type_version: str):
|
|||||||
module_path = f"eveai_chat_workers.retrievers.{partner}.{retriever_type}.{major_minor}"
|
module_path = f"eveai_chat_workers.retrievers.{partner}.{retriever_type}.{major_minor}"
|
||||||
else:
|
else:
|
||||||
module_path = f"eveai_chat_workers.retrievers.globals.{retriever_type}.{major_minor}"
|
module_path = f"eveai_chat_workers.retrievers.globals.{retriever_type}.{major_minor}"
|
||||||
current_app.logger.debug(f"Importing retriever class from {module_path}")
|
|
||||||
module = importlib.import_module(module_path)
|
module = importlib.import_module(module_path)
|
||||||
return module.RetrieverExecutor
|
return module.RetrieverExecutor
|
||||||
@@ -116,8 +116,8 @@ class RetrieverExecutor(BaseRetriever):
|
|||||||
))
|
))
|
||||||
self.log_tuning('retrieve', {
|
self.log_tuning('retrieve', {
|
||||||
"arguments": arguments.model_dump(),
|
"arguments": arguments.model_dump(),
|
||||||
"similarity_threshold": self.similarity_threshold,
|
"similarity_threshold": similarity_threshold,
|
||||||
"k": self.k,
|
"k": k,
|
||||||
"query": compiled_query,
|
"query": compiled_query,
|
||||||
"Raw Results": str(results),
|
"Raw Results": str(results),
|
||||||
"Processed Results": [r.model_dump() for r in processed_results],
|
"Processed Results": [r.model_dump() for r in processed_results],
|
||||||
|
|||||||
@@ -135,11 +135,9 @@ def get_specialist_class(specialist_type: str, type_version: str):
|
|||||||
major_minor = '_'.join(type_version.split('.')[:2])
|
major_minor = '_'.join(type_version.split('.')[:2])
|
||||||
specialist_config = cache_manager.specialists_config_cache.get_config(specialist_type, type_version)
|
specialist_config = cache_manager.specialists_config_cache.get_config(specialist_type, type_version)
|
||||||
partner = specialist_config.get("partner", None)
|
partner = specialist_config.get("partner", None)
|
||||||
current_app.logger.debug(f"Specialist partner for {specialist_type} {type_version} is {partner}")
|
|
||||||
if partner:
|
if partner:
|
||||||
module_path = f"eveai_chat_workers.specialists.{partner}.{specialist_type}.{major_minor}"
|
module_path = f"eveai_chat_workers.specialists.{partner}.{specialist_type}.{major_minor}"
|
||||||
else:
|
else:
|
||||||
module_path = f"eveai_chat_workers.specialists.globals.{specialist_type}.{major_minor}"
|
module_path = f"eveai_chat_workers.specialists.globals.{specialist_type}.{major_minor}"
|
||||||
current_app.logger.debug(f"Importing specialist class from {module_path}")
|
|
||||||
module = importlib.import_module(module_path)
|
module = importlib.import_module(module_path)
|
||||||
return module.SpecialistExecutor
|
return module.SpecialistExecutor
|
||||||
|
|||||||
@@ -40,7 +40,6 @@ class EveAICrewAIAgent(Agent):
|
|||||||
Returns:
|
Returns:
|
||||||
Output of the agent
|
Output of the agent
|
||||||
"""
|
"""
|
||||||
current_app.logger.debug(f"Task Execution {task.name} by {self.name}")
|
|
||||||
# with current_event.create_span(f"Task Execution {task.name} by {self.name}"):
|
# with current_event.create_span(f"Task Execution {task.name} by {self.name}"):
|
||||||
self.specialist.log_tuning(f"EveAI Agent {self.name}, Task {task.name} Start", {})
|
self.specialist.log_tuning(f"EveAI Agent {self.name}, Task {task.name} Start", {})
|
||||||
self.specialist.update_progress("EveAI Agent Task Start",
|
self.specialist.update_progress("EveAI Agent Task Start",
|
||||||
@@ -134,11 +133,17 @@ class EveAICrewAIFlow(Flow):
|
|||||||
return self.state
|
return self.state
|
||||||
|
|
||||||
|
|
||||||
|
class Citation(BaseModel):
|
||||||
|
document_id: int
|
||||||
|
document_version_id: int
|
||||||
|
url: str
|
||||||
|
|
||||||
|
|
||||||
class EveAIFlowState(BaseModel):
|
class EveAIFlowState(BaseModel):
|
||||||
"""Base class for all EveAI flow states"""
|
"""Base class for all EveAI flow states"""
|
||||||
answer: Optional[str] = None
|
answer: Optional[str] = None
|
||||||
detailed_question: Optional[str] = None
|
|
||||||
question: Optional[str] = None
|
question: Optional[str] = None
|
||||||
phase: Optional[str] = None
|
phase: Optional[str] = None
|
||||||
form_request: Optional[Dict[str, Any]] = None
|
form_request: Optional[Dict[str, Any]] = None
|
||||||
citations: Optional[Dict[str, Any]] = None
|
citations: Optional[List[Citation]] = None
|
||||||
|
|
||||||
|
|||||||
@@ -78,14 +78,15 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
return "\n\n".join([
|
return "\n\n".join([
|
||||||
"\n\n".join([
|
"\n\n".join([
|
||||||
f"HUMAN:\n"
|
f"HUMAN:\n"
|
||||||
f"{interaction.specialist_results['detailed_question']}"
|
f"{interaction.specialist_arguments['question']}"
|
||||||
if interaction.specialist_results.get('detailed_question') else "",
|
if interaction.specialist_arguments.get('question') else "",
|
||||||
f"{interaction.specialist_arguments.get('form_values')}"
|
f"{interaction.specialist_arguments.get('form_values')}"
|
||||||
if interaction.specialist_arguments.get('form_values') else "",
|
if interaction.specialist_arguments.get('form_values') else "",
|
||||||
f"AI:\n{interaction.specialist_results['answer']}"
|
f"AI:\n{interaction.specialist_results['answer']}"
|
||||||
if interaction.specialist_results.get('answer') else ""
|
if interaction.specialist_results.get('answer') else ""
|
||||||
]).strip()
|
]).strip()
|
||||||
for interaction in self._cached_session.interactions
|
for interaction in self._cached_session.interactions
|
||||||
|
if interaction.specialist_arguments.get('question') != "Initialize"
|
||||||
])
|
])
|
||||||
|
|
||||||
def _add_task_agent(self, task_name: str, agent_name: str):
|
def _add_task_agent(self, task_name: str, agent_name: str):
|
||||||
@@ -120,10 +121,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
self._state_result_relations[state_name] = result_name
|
self._state_result_relations[state_name] = result_name
|
||||||
|
|
||||||
def _config_default_state_result_relations(self):
|
def _config_default_state_result_relations(self):
|
||||||
for default_attribute_name in ['answer', 'detailed_question', 'form_request', 'phase', 'citations']:
|
for default_attribute_name in ['answer', 'form_request', 'phase', 'citations']:
|
||||||
self._add_state_result_relation(default_attribute_name)
|
self._add_state_result_relation(default_attribute_name)
|
||||||
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _config_state_result_relations(self):
|
def _config_state_result_relations(self):
|
||||||
"""Configure the state-result relations by adding state-result combinations. Use _add_state_result_relation()"""
|
"""Configure the state-result relations by adding state-result combinations. Use _add_state_result_relation()"""
|
||||||
@@ -150,6 +150,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '')
|
agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '')
|
||||||
agent_goal = self._replace_system_variables(agent_goal)
|
agent_goal = self._replace_system_variables(agent_goal)
|
||||||
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
|
agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '')
|
||||||
|
agent_backstory = self._replace_system_variables(agent_backstory)
|
||||||
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
|
agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest')
|
||||||
agent_temperature = agent_config.get('temperature', 0.3)
|
agent_temperature = agent_config.get('temperature', 0.3)
|
||||||
llm = get_crewai_llm(agent_full_model_name, agent_temperature)
|
llm = get_crewai_llm(agent_full_model_name, agent_temperature)
|
||||||
@@ -183,12 +184,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
"verbose": task.tuning
|
"verbose": task.tuning
|
||||||
}
|
}
|
||||||
task_name = task.type.lower()
|
task_name = task.type.lower()
|
||||||
current_app.logger.debug(f"Task {task_name} is getting processed")
|
|
||||||
if task_name in self._task_pydantic_outputs:
|
if task_name in self._task_pydantic_outputs:
|
||||||
task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name]
|
task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name]
|
||||||
current_app.logger.debug(f"Task {task_name} has an output pydantic: {self._task_pydantic_outputs[task_name]}")
|
|
||||||
if task_name in self._task_agents:
|
if task_name in self._task_agents:
|
||||||
current_app.logger.debug(f"Task {task_name} has an agent: {self._task_agents[task_name]}")
|
|
||||||
task_kwargs["agent"] = self._agents[self._task_agents[task_name]]
|
task_kwargs["agent"] = self._agents[self._task_agents[task_name]]
|
||||||
|
|
||||||
# Instantiate the task with dynamic arguments
|
# Instantiate the task with dynamic arguments
|
||||||
@@ -236,46 +234,6 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
The assets can be retrieved using their type name in lower case, e.g. rag_agent"""
|
The assets can be retrieved using their type name in lower case, e.g. rag_agent"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def _detail_question(self, language: str, question: str) -> str:
|
|
||||||
"""Detail question based on conversation history"""
|
|
||||||
try:
|
|
||||||
with current_event.create_span("Specialist Detail Question"):
|
|
||||||
# Get LLM and template
|
|
||||||
template, llm = get_template("history", temperature=0.3)
|
|
||||||
language_template = create_language_template(template, language)
|
|
||||||
|
|
||||||
# Create prompt
|
|
||||||
history_prompt = ChatPromptTemplate.from_template(language_template)
|
|
||||||
|
|
||||||
# Create chain
|
|
||||||
chain = (
|
|
||||||
history_prompt |
|
|
||||||
llm |
|
|
||||||
StrOutputParser()
|
|
||||||
)
|
|
||||||
|
|
||||||
# Execute chain
|
|
||||||
detailed_question = chain.invoke({
|
|
||||||
"history": self.formatted_history,
|
|
||||||
"question": question
|
|
||||||
})
|
|
||||||
|
|
||||||
self.log_tuning("_detail_question", {
|
|
||||||
"cached_session_id": self._cached_session.session_id,
|
|
||||||
"cached_session.interactions": str(self._cached_session.interactions),
|
|
||||||
"original_question": question,
|
|
||||||
"history_used": self.formatted_history,
|
|
||||||
"detailed_question": detailed_question,
|
|
||||||
})
|
|
||||||
|
|
||||||
self.update_progress("Detail Question", {"name": self.type})
|
|
||||||
|
|
||||||
return detailed_question
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
current_app.logger.error(f"Error detailing question: {e}")
|
|
||||||
return question # Fallback to original question
|
|
||||||
|
|
||||||
def _retrieve_context(self, arguments: SpecialistArguments) -> tuple[str, list[dict[str, Any]]]:
|
def _retrieve_context(self, arguments: SpecialistArguments) -> tuple[str, list[dict[str, Any]]]:
|
||||||
with current_event.create_span("Specialist Retrieval"):
|
with current_event.create_span("Specialist Retrieval"):
|
||||||
self.log_tuning("Starting context retrieval", {
|
self.log_tuning("Starting context retrieval", {
|
||||||
@@ -283,12 +241,8 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
"all arguments": arguments.model_dump(),
|
"all arguments": arguments.model_dump(),
|
||||||
})
|
})
|
||||||
|
|
||||||
original_question = arguments.question
|
|
||||||
detailed_question = self._detail_question(arguments.language, original_question)
|
|
||||||
|
|
||||||
modified_arguments = arguments.model_copy(update={
|
modified_arguments = arguments.model_copy(update={
|
||||||
"query": detailed_question,
|
"query": arguments.question
|
||||||
"original_query": original_question
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
@@ -361,11 +315,8 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
|
|
||||||
update_data = {}
|
update_data = {}
|
||||||
state_dict = self.flow.state.model_dump()
|
state_dict = self.flow.state.model_dump()
|
||||||
current_app.logger.debug(f"Updating specialist results with state: {state_dict}")
|
|
||||||
for state_name, result_name in self._state_result_relations.items():
|
for state_name, result_name in self._state_result_relations.items():
|
||||||
current_app.logger.debug(f"Try Updating {result_name} with {state_name}")
|
|
||||||
if state_name in state_dict and state_dict[state_name] is not None:
|
if state_name in state_dict and state_dict[state_name] is not None:
|
||||||
current_app.logger.debug(f"Updating {result_name} with {state_name} = {state_dict[state_name]}")
|
|
||||||
update_data[result_name] = state_dict[state_name]
|
update_data[result_name] = state_dict[state_name]
|
||||||
|
|
||||||
return specialist_results.model_copy(update=update_data)
|
return specialist_results.model_copy(update=update_data)
|
||||||
@@ -383,35 +334,22 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
|
|||||||
|
|
||||||
# Initialize the standard state values
|
# Initialize the standard state values
|
||||||
self.flow.state.answer = None
|
self.flow.state.answer = None
|
||||||
self.flow.state.detailed_question = None
|
self.flow.state.question = None
|
||||||
self.flow.state.form_request = None
|
self.flow.state.form_request = None
|
||||||
self.flow.state.phase = None
|
self.flow.state.phase = None
|
||||||
self.flow.state.citations = []
|
self.flow.state.citations = []
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult:
|
def execute(self, arguments: SpecialistArguments,
|
||||||
|
formatted_context: Optional[str], citations: Optional[list[dict[str, Any]]]) -> SpecialistResult:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
|
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
|
||||||
current_app.logger.debug(f"Retrievers for this specialist: {self.retrievers}")
|
|
||||||
if self.retrievers:
|
if self.retrievers:
|
||||||
# Detail the incoming query
|
formatted_context = None
|
||||||
if self._cached_session.interactions:
|
citations = None
|
||||||
question = arguments.question
|
result = self.execute(arguments, formatted_context, citations)
|
||||||
language = arguments.language
|
|
||||||
detailed_question = self._detail_question(language, question)
|
|
||||||
else:
|
|
||||||
detailed_question = arguments.question
|
|
||||||
|
|
||||||
modified_arguments = {
|
|
||||||
"question": detailed_question,
|
|
||||||
"original_question": arguments.question
|
|
||||||
}
|
|
||||||
detailed_arguments = arguments.model_copy(update=modified_arguments)
|
|
||||||
formatted_context, citations = self._retrieve_context(detailed_arguments)
|
|
||||||
result = self.execute(detailed_arguments, formatted_context, citations)
|
|
||||||
modified_result = {
|
modified_result = {
|
||||||
"detailed_question": detailed_question,
|
|
||||||
"citations": citations,
|
"citations": citations,
|
||||||
}
|
}
|
||||||
intermediate_result = result.model_copy(update=modified_result)
|
intermediate_result = result.model_copy(update=modified_result)
|
||||||
|
|||||||
@@ -69,18 +69,12 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||||
self.log_tuning("RAG Specialist execution started", {})
|
self.log_tuning("RAG Specialist execution started", {})
|
||||||
|
|
||||||
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
|
|
||||||
current_app.logger.debug(f"Formatted Context: {formatted_context}")
|
|
||||||
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
|
|
||||||
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
|
|
||||||
|
|
||||||
if not self._cached_session.interactions:
|
if not self._cached_session.interactions:
|
||||||
specialist_phase = "initial"
|
specialist_phase = "initial"
|
||||||
else:
|
else:
|
||||||
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
||||||
|
|
||||||
results = None
|
results = None
|
||||||
current_app.logger.debug(f"Specialist Phase: {specialist_phase}")
|
|
||||||
|
|
||||||
match specialist_phase:
|
match specialist_phase:
|
||||||
case "initial":
|
case "initial":
|
||||||
@@ -191,7 +185,6 @@ class RAGFlow(EveAICrewAIFlow[RAGFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
self.state.input = RAGSpecialistInput.model_validate(inputs)
|
self.state.input = RAGSpecialistInput.model_validate(inputs)
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
from os import wait
|
from os import wait
|
||||||
from typing import Optional, List
|
from typing import Optional, List, Dict, Any
|
||||||
|
|
||||||
from crewai.flow.flow import start, listen, and_
|
from crewai.flow.flow import start, listen, and_
|
||||||
from flask import current_app
|
from flask import current_app
|
||||||
@@ -47,6 +47,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
|
|
||||||
def _config_state_result_relations(self):
|
def _config_state_result_relations(self):
|
||||||
self._add_state_result_relation("rag_output")
|
self._add_state_result_relation("rag_output")
|
||||||
|
self._add_state_result_relation("citations")
|
||||||
|
|
||||||
def _instantiate_specialist(self):
|
def _instantiate_specialist(self):
|
||||||
verbose = self.tuning
|
verbose = self.tuning
|
||||||
@@ -69,18 +70,12 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||||
self.log_tuning("RAG Specialist execution started", {})
|
self.log_tuning("RAG Specialist execution started", {})
|
||||||
|
|
||||||
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
|
|
||||||
current_app.logger.debug(f"Formatted Context: {formatted_context}")
|
|
||||||
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
|
|
||||||
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
|
|
||||||
|
|
||||||
if not self._cached_session.interactions:
|
if not self._cached_session.interactions:
|
||||||
specialist_phase = "initial"
|
specialist_phase = "initial"
|
||||||
else:
|
else:
|
||||||
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
||||||
|
|
||||||
results = None
|
results = None
|
||||||
current_app.logger.debug(f"Specialist Phase: {specialist_phase}")
|
|
||||||
|
|
||||||
match specialist_phase:
|
match specialist_phase:
|
||||||
case "initial":
|
case "initial":
|
||||||
@@ -112,6 +107,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
INSUFFICIENT_INFORMATION_MESSAGE,
|
INSUFFICIENT_INFORMATION_MESSAGE,
|
||||||
arguments.language)
|
arguments.language)
|
||||||
|
|
||||||
|
formatted_context, citations = self._retrieve_context(arguments)
|
||||||
|
|
||||||
if formatted_context:
|
if formatted_context:
|
||||||
flow_inputs = {
|
flow_inputs = {
|
||||||
"language": arguments.language,
|
"language": arguments.language,
|
||||||
@@ -128,16 +125,18 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
flow_results.rag_output.answer = insufficient_info_message
|
flow_results.rag_output.answer = insufficient_info_message
|
||||||
|
|
||||||
rag_output = flow_results.rag_output
|
rag_output = flow_results.rag_output
|
||||||
|
|
||||||
else:
|
else:
|
||||||
rag_output = RAGOutput(answer=insufficient_info_message, insufficient_info=True)
|
rag_output = RAGOutput(answer=insufficient_info_message, insufficient_info=True)
|
||||||
|
|
||||||
self.flow.state.rag_output = rag_output
|
self.flow.state.rag_output = rag_output
|
||||||
|
self.flow.state.citations = citations
|
||||||
self.flow.state.answer = rag_output.answer
|
self.flow.state.answer = rag_output.answer
|
||||||
self.flow.state.phase = "rag"
|
self.flow.state.phase = "rag"
|
||||||
|
|
||||||
results = RAGSpecialistResult.create_for_type(self.type, self.type_version)
|
results = RAGSpecialistResult.create_for_type(self.type, self.type_version)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
class RAGSpecialistInput(BaseModel):
|
class RAGSpecialistInput(BaseModel):
|
||||||
language: Optional[str] = Field(None, alias="language")
|
language: Optional[str] = Field(None, alias="language")
|
||||||
@@ -156,6 +155,7 @@ class RAGFlowState(EveAIFlowState):
|
|||||||
"""Flow state for RAG specialist that automatically updates from task outputs"""
|
"""Flow state for RAG specialist that automatically updates from task outputs"""
|
||||||
input: Optional[RAGSpecialistInput] = None
|
input: Optional[RAGSpecialistInput] = None
|
||||||
rag_output: Optional[RAGOutput] = None
|
rag_output: Optional[RAGOutput] = None
|
||||||
|
citations: Optional[List[Dict[str, Any]]] = None
|
||||||
|
|
||||||
|
|
||||||
class RAGFlow(EveAICrewAIFlow[RAGFlowState]):
|
class RAGFlow(EveAICrewAIFlow[RAGFlowState]):
|
||||||
@@ -190,8 +190,6 @@ class RAGFlow(EveAICrewAIFlow[RAGFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
current_app.logger.debug(f"Inputs: {inputs}")
|
|
||||||
self.state.input = RAGSpecialistInput.model_validate(inputs)
|
self.state.input = RAGSpecialistInput.model_validate(inputs)
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -216,9 +216,7 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
|||||||
async def execute_rag(self):
|
async def execute_rag(self):
|
||||||
inputs = self.state.input.model_dump()
|
inputs = self.state.input.model_dump()
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug("In execute_rag")
|
|
||||||
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
|
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
|
||||||
current_app.logger.debug(f"Crew execution ended with output:\n{crew_output}")
|
|
||||||
self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump())
|
self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump())
|
||||||
output_pydantic = crew_output.pydantic
|
output_pydantic = crew_output.pydantic
|
||||||
if not output_pydantic:
|
if not output_pydantic:
|
||||||
@@ -277,11 +275,8 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
|||||||
if self.state.spin:
|
if self.state.spin:
|
||||||
additional_questions = additional_questions + self.state.spin.questions
|
additional_questions = additional_questions + self.state.spin.questions
|
||||||
inputs["additional_questions"] = additional_questions
|
inputs["additional_questions"] = additional_questions
|
||||||
current_app.logger.debug(f"Prepared Answers: \n{inputs['prepared_answers']}")
|
|
||||||
current_app.logger.debug(f"Additional Questions: \n{additional_questions}")
|
|
||||||
try:
|
try:
|
||||||
crew_output = await self.rag_consolidation_crew.kickoff_async(inputs=inputs)
|
crew_output = await self.rag_consolidation_crew.kickoff_async(inputs=inputs)
|
||||||
current_app.logger.debug(f"Consolidation output after crew execution:\n{crew_output}")
|
|
||||||
self.specialist_executor.log_tuning("RAG Consolidation Crew Output", crew_output.model_dump())
|
self.specialist_executor.log_tuning("RAG Consolidation Crew Output", crew_output.model_dump())
|
||||||
output_pydantic = crew_output.pydantic
|
output_pydantic = crew_output.pydantic
|
||||||
if not output_pydantic:
|
if not output_pydantic:
|
||||||
@@ -295,7 +290,6 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
self.state.input = SPINSpecialistInput.model_validate(inputs)
|
self.state.input = SPINSpecialistInput.model_validate(inputs)
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from typing import Dict, Any, Optional
|
from typing import Dict, Any, Optional, List
|
||||||
from pydantic import BaseModel, Field, model_validator
|
from pydantic import BaseModel, Field, model_validator
|
||||||
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments
|
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments
|
||||||
from common.extensions import cache_manager
|
from common.extensions import cache_manager
|
||||||
@@ -103,10 +103,9 @@ class SpecialistResult(BaseModel):
|
|||||||
|
|
||||||
# Structural optional fields available for all specialists
|
# Structural optional fields available for all specialists
|
||||||
answer: Optional[str] = Field(None, description="Optional textual answer from the specialist")
|
answer: Optional[str] = Field(None, description="Optional textual answer from the specialist")
|
||||||
detailed_question: Optional[str] = Field(None, description="Optional detailed question for the specialist")
|
|
||||||
form_request: Optional[Dict[str, Any]] = Field(None, description="Optional form definition to request user input")
|
form_request: Optional[Dict[str, Any]] = Field(None, description="Optional form definition to request user input")
|
||||||
phase: Optional[str] = Field(None, description="Phase of the specialist's workflow")
|
phase: Optional[str] = Field(None, description="Phase of the specialist's workflow")
|
||||||
citations: Optional[Dict[str, Any]] = Field(None, description="Citations for the specialist's answer")
|
citations: Optional[List[Dict[str, Any]]] = Field(None, description="Citations for the specialist's answer")
|
||||||
|
|
||||||
@model_validator(mode='after')
|
@model_validator(mode='after')
|
||||||
def validate_required_results(self) -> 'SpecialistResult':
|
def validate_required_results(self) -> 'SpecialistResult':
|
||||||
|
|||||||
@@ -71,11 +71,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||||
self.log_tuning("Traicie KO Criteria Interview Definition Specialist execution started", {})
|
self.log_tuning("Traicie KO Criteria Interview Definition Specialist execution started", {})
|
||||||
|
|
||||||
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
|
|
||||||
current_app.logger.debug(f"Formatted Context: {formatted_context}")
|
|
||||||
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
|
|
||||||
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
|
|
||||||
|
|
||||||
if not self._cached_session.interactions:
|
if not self._cached_session.interactions:
|
||||||
specialist_phase = "initial"
|
specialist_phase = "initial"
|
||||||
else:
|
else:
|
||||||
@@ -104,13 +99,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
|
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
|
||||||
"Specialist is no Selection Specialist")
|
"Specialist is no Selection Specialist")
|
||||||
|
|
||||||
current_app.logger.debug(f"Specialist Competencies:\n"
|
|
||||||
f"{selection_specialist.configuration.get("competencies", [])}")
|
|
||||||
|
|
||||||
ko_competencies = []
|
ko_competencies = []
|
||||||
for competency in selection_specialist.configuration.get("competencies", []):
|
for competency in selection_specialist.configuration.get("competencies", []):
|
||||||
if competency["is_knockout"] is True and competency["assess"] is True:
|
if competency["is_knockout"] is True and competency["assess"] is True:
|
||||||
current_app.logger.debug(f"Assessable Knockout competency: {competency}")
|
|
||||||
ko_competencies.append({"title": competency["title"], "description": competency["description"]})
|
ko_competencies.append({"title": competency["title"], "description": competency["description"]})
|
||||||
|
|
||||||
tone_of_voice = selection_specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
|
tone_of_voice = selection_specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
|
||||||
@@ -118,7 +109,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
(item for item in TONE_OF_VOICE if item["name"] == tone_of_voice),
|
(item for item in TONE_OF_VOICE if item["name"] == tone_of_voice),
|
||||||
None # fallback indien niet gevonden
|
None # fallback indien niet gevonden
|
||||||
)
|
)
|
||||||
current_app.logger.debug(f"Selected tone of voice: {selected_tone_of_voice}")
|
|
||||||
tone_of_voice_context = f"{selected_tone_of_voice["description"]}"
|
tone_of_voice_context = f"{selected_tone_of_voice["description"]}"
|
||||||
|
|
||||||
language_level = selection_specialist.configuration.get('language_level', 'Standard')
|
language_level = selection_specialist.configuration.get('language_level', 'Standard')
|
||||||
@@ -126,7 +116,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
(item for item in LANGUAGE_LEVEL if item["name"] == language_level),
|
(item for item in LANGUAGE_LEVEL if item["name"] == language_level),
|
||||||
None
|
None
|
||||||
)
|
)
|
||||||
current_app.logger.debug(f"Selected language level: {selected_language_level}")
|
|
||||||
language_level_context = (f"{selected_language_level['description']}, "
|
language_level_context = (f"{selected_language_level['description']}, "
|
||||||
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
|
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
|
||||||
|
|
||||||
@@ -140,12 +129,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
||||||
current_app.logger.debug(f"Flow results: {flow_results}")
|
|
||||||
current_app.logger.debug(f"Flow state: {self.flow.state}")
|
|
||||||
|
|
||||||
new_type = "TRAICIE_KO_CRITERIA_QUESTIONS"
|
new_type = "TRAICIE_KO_CRITERIA_QUESTIONS"
|
||||||
|
|
||||||
current_app.logger.debug(f"KO Criteria Questions:\n {self.flow.state.ko_questions}")
|
|
||||||
# Controleer of we een KOQuestions object hebben of een lijst van KOQuestion objecten
|
# Controleer of we een KOQuestions object hebben of een lijst van KOQuestion objecten
|
||||||
if hasattr(self.flow.state.ko_questions, 'to_json'):
|
if hasattr(self.flow.state.ko_questions, 'to_json'):
|
||||||
# Het is een KOQuestions object
|
# Het is een KOQuestions object
|
||||||
@@ -161,8 +147,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
ko_questions_data = [q.model_dump() for q in self.flow.state.ko_questions]
|
ko_questions_data = [q.model_dump() for q in self.flow.state.ko_questions]
|
||||||
json_str = json.dumps(ko_questions_data, ensure_ascii=False, indent=2)
|
json_str = json.dumps(ko_questions_data, ensure_ascii=False, indent=2)
|
||||||
|
|
||||||
current_app.logger.debug(f"KO Criteria Questions json style:\n {json_str}")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
asset = db.session.query(EveAIAsset).filter(
|
asset = db.session.query(EveAIAsset).filter(
|
||||||
EveAIAsset.type == new_type,
|
EveAIAsset.type == new_type,
|
||||||
@@ -281,21 +265,17 @@ class KOFlow(EveAICrewAIFlow[KOFlowState]):
|
|||||||
async def execute_ko_def_definition(self):
|
async def execute_ko_def_definition(self):
|
||||||
inputs = self.state.input.model_dump()
|
inputs = self.state.input.model_dump()
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug("Run execute_ko_interview_definition")
|
|
||||||
crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs)
|
crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs)
|
||||||
# Unfortunately, crew_output will only contain the output of the latest task.
|
# Unfortunately, crew_output will only contain the output of the latest task.
|
||||||
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
||||||
# are copies to the flow state.
|
# are copies to the flow state.
|
||||||
update = {}
|
update = {}
|
||||||
for task in self.ko_def_crew.tasks:
|
for task in self.ko_def_crew.tasks:
|
||||||
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
|
|
||||||
if task.name == "traicie_ko_criteria_interview_definition_task":
|
if task.name == "traicie_ko_criteria_interview_definition_task":
|
||||||
# update["competencies"] = task.output.pydantic.competencies
|
# update["competencies"] = task.output.pydantic.competencies
|
||||||
self.state.ko_questions = task.output.pydantic.ko_questions
|
self.state.ko_questions = task.output.pydantic.ko_questions
|
||||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||||
self.state.phase = "personal_contact_data"
|
self.state.phase = "personal_contact_data"
|
||||||
current_app.logger.debug(f"State after execute_ko_def_definition: {self.state}")
|
|
||||||
current_app.logger.debug(f"State dump after execute_ko_def_definition: {self.state.model_dump()}")
|
|
||||||
return crew_output
|
return crew_output
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}")
|
current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}")
|
||||||
@@ -303,9 +283,6 @@ class KOFlow(EveAICrewAIFlow[KOFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
current_app.logger.debug(f"Inputs: {inputs}")
|
|
||||||
self.state.input = KODefInput.model_validate(inputs)
|
self.state.input = KODefInput.model_validate(inputs)
|
||||||
current_app.logger.debug(f"State: {self.state}")
|
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -143,8 +143,6 @@ class VacancyDefinitionFlow(EveAICrewAIFlow[VacancyDefFlowState]):
|
|||||||
# update["criteria"] = task.output.pydantic.criteria
|
# update["criteria"] = task.output.pydantic.criteria
|
||||||
self.state.criteria = task.output.pydantic.criteria
|
self.state.criteria = task.output.pydantic.criteria
|
||||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||||
current_app.logger.debug(f"State after execute_vac_def: {self.state}")
|
|
||||||
current_app.logger.debug(f"State dump after execute_vac_def: {self.state.model_dump()}")
|
|
||||||
return crew_output
|
return crew_output
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.error(f"CREW execute_vac_def Kickoff Error: {str(e)}")
|
current_app.logger.error(f"CREW execute_vac_def Kickoff Error: {str(e)}")
|
||||||
@@ -152,7 +150,6 @@ class VacancyDefinitionFlow(EveAICrewAIFlow[VacancyDefFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
self.state.input = VacancyDefinitionSpecialistInput.model_validate(inputs)
|
self.state.input = VacancyDefinitionSpecialistInput.model_validate(inputs)
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -168,20 +168,16 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]):
|
|||||||
async def execute_role_definition (self):
|
async def execute_role_definition (self):
|
||||||
inputs = self.state.input.model_dump()
|
inputs = self.state.input.model_dump()
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug("In execute_role_definition")
|
|
||||||
crew_output = await self.role_definition_crew.kickoff_async(inputs=inputs)
|
crew_output = await self.role_definition_crew.kickoff_async(inputs=inputs)
|
||||||
# Unfortunately, crew_output will only contain the output of the latest task.
|
# Unfortunately, crew_output will only contain the output of the latest task.
|
||||||
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
||||||
# are copies to the flow state.
|
# are copies to the flow state.
|
||||||
update = {}
|
update = {}
|
||||||
for task in self.role_definition_crew.tasks:
|
for task in self.role_definition_crew.tasks:
|
||||||
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
|
|
||||||
if task.name == "traicie_get_competencies_task":
|
if task.name == "traicie_get_competencies_task":
|
||||||
# update["competencies"] = task.output.pydantic.competencies
|
# update["competencies"] = task.output.pydantic.competencies
|
||||||
self.state.competencies = task.output.pydantic.competencies
|
self.state.competencies = task.output.pydantic.competencies
|
||||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||||
current_app.logger.debug(f"State after execute_role_definition: {self.state}")
|
|
||||||
current_app.logger.debug(f"State dump after execute_role_definition: {self.state.model_dump()}")
|
|
||||||
return crew_output
|
return crew_output
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.error(f"CREW execute_role_definition Kickoff Error: {str(e)}")
|
current_app.logger.error(f"CREW execute_role_definition Kickoff Error: {str(e)}")
|
||||||
@@ -189,9 +185,6 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
current_app.logger.debug(f"Inputs: {inputs}")
|
|
||||||
self.state.input = RoleDefinitionSpecialistInput.model_validate(inputs)
|
self.state.input = RoleDefinitionSpecialistInput.model_validate(inputs)
|
||||||
current_app.logger.debug(f"State: {self.state}")
|
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -174,20 +174,16 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]):
|
|||||||
async def execute_role_definition (self):
|
async def execute_role_definition (self):
|
||||||
inputs = self.state.input.model_dump()
|
inputs = self.state.input.model_dump()
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug("In execute_role_definition")
|
|
||||||
crew_output = await self.role_definition_crew.kickoff_async(inputs=inputs)
|
crew_output = await self.role_definition_crew.kickoff_async(inputs=inputs)
|
||||||
# Unfortunately, crew_output will only contain the output of the latest task.
|
# Unfortunately, crew_output will only contain the output of the latest task.
|
||||||
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
# As we will only take into account the flow state, we need to ensure both competencies and criteria
|
||||||
# are copies to the flow state.
|
# are copies to the flow state.
|
||||||
update = {}
|
update = {}
|
||||||
for task in self.role_definition_crew.tasks:
|
for task in self.role_definition_crew.tasks:
|
||||||
current_app.logger.debug(f"Task {task.name} output:\n{task.output}")
|
|
||||||
if task.name == "traicie_get_competencies_task":
|
if task.name == "traicie_get_competencies_task":
|
||||||
# update["competencies"] = task.output.pydantic.competencies
|
# update["competencies"] = task.output.pydantic.competencies
|
||||||
self.state.competencies = task.output.pydantic.competencies
|
self.state.competencies = task.output.pydantic.competencies
|
||||||
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
# crew_output.pydantic = crew_output.pydantic.model_copy(update=update)
|
||||||
current_app.logger.debug(f"State after execute_role_definition: {self.state}")
|
|
||||||
current_app.logger.debug(f"State dump after execute_role_definition: {self.state.model_dump()}")
|
|
||||||
return crew_output
|
return crew_output
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
current_app.logger.error(f"CREW execute_role_definition Kickoff Error: {str(e)}")
|
current_app.logger.error(f"CREW execute_role_definition Kickoff Error: {str(e)}")
|
||||||
@@ -195,9 +191,6 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
current_app.logger.debug(f"Inputs: {inputs}")
|
|
||||||
self.state.input = RoleDefinitionSpecialistInput.model_validate(inputs)
|
self.state.input = RoleDefinitionSpecialistInput.model_validate(inputs)
|
||||||
current_app.logger.debug(f"State: {self.state}")
|
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
|
|
||||||
# Load the Tenant & set language
|
# Load the Tenant & set language
|
||||||
self.tenant = Tenant.query.get_or_404(tenant_id)
|
self.tenant = Tenant.query.get_or_404(tenant_id)
|
||||||
|
self.specialist_phase = "initial"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def type(self) -> str:
|
def type(self) -> str:
|
||||||
@@ -106,19 +107,13 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
|
||||||
self.log_tuning("Traicie Selection Specialist execution started", {})
|
self.log_tuning("Traicie Selection Specialist execution started", {})
|
||||||
|
|
||||||
current_app.logger.debug(f"Arguments: {arguments.model_dump()}")
|
|
||||||
current_app.logger.debug(f"Formatted Context: {formatted_context}")
|
|
||||||
current_app.logger.debug(f"Formatted History: {self._formatted_history}")
|
|
||||||
current_app.logger.debug(f"Cached Chat Session: {self._cached_session}")
|
|
||||||
|
|
||||||
if not self._cached_session.interactions:
|
if not self._cached_session.interactions:
|
||||||
specialist_phase = "initial"
|
self.specialist_phase = "initial"
|
||||||
else:
|
else:
|
||||||
specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
|
||||||
|
|
||||||
results = None
|
results = None
|
||||||
current_app.logger.debug(f"Specialist phase: {specialist_phase}")
|
match self.specialist_phase:
|
||||||
match specialist_phase:
|
|
||||||
case "initial":
|
case "initial":
|
||||||
results = self.execute_initial_state(arguments, formatted_context, citations)
|
results = self.execute_initial_state(arguments, formatted_context, citations)
|
||||||
case "start_selection_procedure":
|
case "start_selection_procedure":
|
||||||
@@ -149,16 +144,21 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
interaction_mode = arguments.interaction_mode
|
interaction_mode = arguments.interaction_mode
|
||||||
if not interaction_mode:
|
if not interaction_mode:
|
||||||
interaction_mode = "selection"
|
interaction_mode = "selection"
|
||||||
current_app.logger.debug(f"Interaction mode: {interaction_mode}")
|
|
||||||
|
|
||||||
welcome_message = self.specialist.configuration.get("welcome_message", "Welcome to our selection process.")
|
welcome_message = self.specialist.configuration.get("welcome_message", "Welcome to our selection process.")
|
||||||
welcome_message = TranslationServices.translate(self.tenant_id, welcome_message, arguments.language)
|
welcome_message = TranslationServices.translate(self.tenant_id, welcome_message, arguments.language)
|
||||||
|
|
||||||
if interaction_mode == "selection":
|
if interaction_mode == "selection":
|
||||||
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations,
|
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations,
|
||||||
welcome_message)
|
welcome_message)
|
||||||
else: # We are in orientation mode, so we perform standard rag
|
# We are in orientation mode, so we give a standard message, and move to rag state
|
||||||
return self.execute_rag_state(arguments, formatted_context, citations, welcome_message)
|
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION,
|
||||||
|
arguments.language)
|
||||||
|
self.flow.state.answer = f"{welcome_message}\n\n{start_selection_question}"
|
||||||
|
self.flow.state.phase = "rag"
|
||||||
|
|
||||||
|
results = SelectionResult.create_for_type(self.type, self.type_version)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations,
|
def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations,
|
||||||
start_message=None) -> SpecialistResult:
|
start_message=None) -> SpecialistResult:
|
||||||
@@ -172,7 +172,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
ko_questions = self._get_ko_questions()
|
ko_questions = self._get_ko_questions()
|
||||||
fields = {}
|
fields = {}
|
||||||
for ko_question in ko_questions.ko_questions:
|
for ko_question in ko_questions.ko_questions:
|
||||||
current_app.logger.debug(f"KO Question: {ko_question}")
|
|
||||||
fields[ko_question.title] = {
|
fields[ko_question.title] = {
|
||||||
"name": ko_question.title,
|
"name": ko_question.title,
|
||||||
"description": ko_question.title,
|
"description": ko_question.title,
|
||||||
@@ -213,11 +212,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
if not arguments.form_values:
|
if not arguments.form_values:
|
||||||
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
|
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
|
||||||
"No form values returned")
|
"No form values returned")
|
||||||
current_app.logger.debug(f"Form values: {arguments.form_values}")
|
|
||||||
|
|
||||||
# Load the previous KO Questions
|
# Load the previous KO Questions
|
||||||
previous_ko_questions = self._get_ko_questions().ko_questions
|
previous_ko_questions = self._get_ko_questions().ko_questions
|
||||||
current_app.logger.debug(f"Previous KO Questions: {previous_ko_questions}")
|
|
||||||
|
|
||||||
# Evaluate KO Criteria
|
# Evaluate KO Criteria
|
||||||
evaluation = "positive"
|
evaluation = "positive"
|
||||||
@@ -355,39 +352,37 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def execute_rag_state(self, arguments: SpecialistArguments, formatted_context, citations, welcome_message=None) \
|
def execute_rag_state(self, arguments: SpecialistArguments, formatted_context, citations) \
|
||||||
-> SpecialistResult:
|
-> SpecialistResult:
|
||||||
self.log_tuning("Traicie Selection Specialist rag_state started", {})
|
self.log_tuning("Traicie Selection Specialist rag_state started", {})
|
||||||
|
|
||||||
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION,
|
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION,
|
||||||
arguments.language)
|
arguments.language)
|
||||||
if welcome_message:
|
|
||||||
answer = f"{welcome_message}\n\n{start_selection_question}"
|
|
||||||
else:
|
|
||||||
answer = ""
|
|
||||||
|
|
||||||
rag_results = None
|
rag_output = None
|
||||||
if arguments.question:
|
|
||||||
if HumanAnswerServices.check_additional_information(self.tenant_id,
|
|
||||||
START_SELECTION_QUESTION,
|
|
||||||
arguments.question,
|
|
||||||
arguments.language):
|
|
||||||
rag_results = self.execute_rag(arguments, formatted_context, citations)
|
|
||||||
self.flow.state.rag_output = rag_results.rag_output
|
|
||||||
answer = f"{answer}\n{rag_results.answer}"
|
|
||||||
|
|
||||||
if HumanAnswerServices.check_affirmative_answer(self.tenant_id,
|
if HumanAnswerServices.check_additional_information(self.tenant_id,
|
||||||
START_SELECTION_QUESTION,
|
START_SELECTION_QUESTION,
|
||||||
arguments.question,
|
arguments.question,
|
||||||
arguments.language):
|
arguments.language):
|
||||||
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer)
|
rag_output = self.execute_rag(arguments, formatted_context, citations)
|
||||||
|
self.flow.state.rag_output = rag_output
|
||||||
|
answer = rag_output.answer
|
||||||
|
else:
|
||||||
|
answer = ""
|
||||||
|
|
||||||
self.flow.state.answer = answer
|
if HumanAnswerServices.check_affirmative_answer(self.tenant_id,
|
||||||
self.flow.state.phase = "rag"
|
START_SELECTION_QUESTION,
|
||||||
self.flow.state.form_request = None
|
arguments.question,
|
||||||
|
arguments.language):
|
||||||
|
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer)
|
||||||
|
else:
|
||||||
|
self.flow.state.answer = f"{answer}\n\n{start_selection_question}"
|
||||||
|
self.flow.state.phase = "rag"
|
||||||
|
self.flow.state.form_request = None
|
||||||
|
|
||||||
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
results = SelectionResult.create_for_type(self.type, self.type_version,)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput:
|
def execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput:
|
||||||
self.log_tuning("RAG Specialist execution started", {})
|
self.log_tuning("RAG Specialist execution started", {})
|
||||||
@@ -395,6 +390,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
insufficient_info_message = TranslationServices.translate(self.tenant_id,
|
insufficient_info_message = TranslationServices.translate(self.tenant_id,
|
||||||
INSUFFICIENT_INFORMATION_MESSAGE,
|
INSUFFICIENT_INFORMATION_MESSAGE,
|
||||||
arguments.language)
|
arguments.language)
|
||||||
|
|
||||||
|
formatted_context, citations = self._retrieve_context(arguments)
|
||||||
|
self.flow.state.citations = citations
|
||||||
if formatted_context:
|
if formatted_context:
|
||||||
flow_inputs = {
|
flow_inputs = {
|
||||||
"language": arguments.language,
|
"language": arguments.language,
|
||||||
@@ -403,9 +401,11 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
"history": self.formatted_history,
|
"history": self.formatted_history,
|
||||||
"name": self.specialist.configuration.get('name', ''),
|
"name": self.specialist.configuration.get('name', ''),
|
||||||
}
|
}
|
||||||
rag_output = self.flow.kickoff(inputs=flow_inputs)
|
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
||||||
if rag_output.rag_output.insufficient_info:
|
if flow_results.rag_output.insufficient_info:
|
||||||
rag_output.rag_output.answer = insufficient_info_message
|
flow_results.rag_output.answer = insufficient_info_message
|
||||||
|
|
||||||
|
rag_output = flow_results.rag_output
|
||||||
else:
|
else:
|
||||||
rag_output = RAGOutput(answer=insufficient_info_message, insufficient_info=True)
|
rag_output = RAGOutput(answer=insufficient_info_message, insufficient_info=True)
|
||||||
|
|
||||||
@@ -418,8 +418,11 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
START_SELECTION_QUESTION,
|
START_SELECTION_QUESTION,
|
||||||
arguments.question,
|
arguments.question,
|
||||||
arguments.language):
|
arguments.language):
|
||||||
results = self.execute_rag(arguments, formatted_context, citations)
|
rag_output = self.execute_rag(arguments, formatted_context, citations)
|
||||||
return results
|
|
||||||
|
self.flow.state.rag_output = rag_output
|
||||||
|
|
||||||
|
return rag_output
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -439,7 +442,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
|||||||
ko_questions_data = minio_client.download_asset_file(self.tenant_id, ko_questions_asset.bucket_name,
|
ko_questions_data = minio_client.download_asset_file(self.tenant_id, ko_questions_asset.bucket_name,
|
||||||
ko_questions_asset.object_name)
|
ko_questions_asset.object_name)
|
||||||
ko_questions = KOQuestions.from_json(ko_questions_data)
|
ko_questions = KOQuestions.from_json(ko_questions_data)
|
||||||
current_app.logger.debug(f"KO Questions: {ko_questions}")
|
|
||||||
|
|
||||||
return ko_questions
|
return ko_questions
|
||||||
|
|
||||||
@@ -470,8 +472,8 @@ class SelectionInput(BaseModel):
|
|||||||
history: Optional[str] = Field(None, alias="history")
|
history: Optional[str] = Field(None, alias="history")
|
||||||
name: Optional[str] = Field(None, alias="name")
|
name: Optional[str] = Field(None, alias="name")
|
||||||
# Selection elements
|
# Selection elements
|
||||||
region: str = Field(..., alias="region")
|
region: Optional[str] = Field(None, alias="region")
|
||||||
working_schedule: Optional[str] = Field(..., alias="working_schedule")
|
working_schedule: Optional[str] = Field(None, alias="working_schedule")
|
||||||
start_date: Optional[date] = Field(None, alias="vacancy_text")
|
start_date: Optional[date] = Field(None, alias="vacancy_text")
|
||||||
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
|
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
|
||||||
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
|
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
|
||||||
@@ -489,6 +491,7 @@ class SelectionFlowState(EveAIFlowState):
|
|||||||
ko_criteria_answers: Optional[Dict[str, str]] = None
|
ko_criteria_answers: Optional[Dict[str, str]] = None
|
||||||
personal_contact_data: Optional[PersonalContactData] = None
|
personal_contact_data: Optional[PersonalContactData] = None
|
||||||
contact_time: Optional[str] = None
|
contact_time: Optional[str] = None
|
||||||
|
citations: Optional[List[Dict[str, Any]]] = None
|
||||||
|
|
||||||
|
|
||||||
class SelectionResult(SpecialistResult):
|
class SelectionResult(SpecialistResult):
|
||||||
@@ -530,7 +533,6 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
|
|||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def kickoff_async(self, inputs=None):
|
async def kickoff_async(self, inputs=None):
|
||||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
|
||||||
self.state.input = SelectionInput.model_validate(inputs)
|
self.state.input = SelectionInput.model_validate(inputs)
|
||||||
result = await super().kickoff_async(inputs)
|
result = await super().kickoff_async(inputs)
|
||||||
return self.state
|
return self.state
|
||||||
|
|||||||
@@ -586,7 +586,6 @@ def combine_chunks_for_markdown(potential_chunks, min_chars, max_chars, processo
|
|||||||
# Force new chunk if pattern matches
|
# Force new chunk if pattern matches
|
||||||
if chunking_patterns and matches_chunking_pattern(chunk, chunking_patterns):
|
if chunking_patterns and matches_chunking_pattern(chunk, chunking_patterns):
|
||||||
if current_chunk and current_length >= min_chars:
|
if current_chunk and current_length >= min_chars:
|
||||||
current_app.logger.debug(f"Chunk Length of chunk to embed: {len(current_chunk)} ")
|
|
||||||
actual_chunks.append(current_chunk)
|
actual_chunks.append(current_chunk)
|
||||||
current_chunk = chunk
|
current_chunk = chunk
|
||||||
current_length = chunk_length
|
current_length = chunk_length
|
||||||
@@ -594,7 +593,6 @@ def combine_chunks_for_markdown(potential_chunks, min_chars, max_chars, processo
|
|||||||
|
|
||||||
if current_length + chunk_length > max_chars:
|
if current_length + chunk_length > max_chars:
|
||||||
if current_length >= min_chars:
|
if current_length >= min_chars:
|
||||||
current_app.logger.debug(f"Chunk Length of chunk to embed: {len(current_chunk)} ")
|
|
||||||
actual_chunks.append(current_chunk)
|
actual_chunks.append(current_chunk)
|
||||||
current_chunk = chunk
|
current_chunk = chunk
|
||||||
current_length = chunk_length
|
current_length = chunk_length
|
||||||
@@ -608,7 +606,6 @@ def combine_chunks_for_markdown(potential_chunks, min_chars, max_chars, processo
|
|||||||
|
|
||||||
# Handle the last chunk
|
# Handle the last chunk
|
||||||
if current_chunk and current_length >= 0:
|
if current_chunk and current_length >= 0:
|
||||||
current_app.logger.debug(f"Chunk Length of chunk to embed: {len(current_chunk)} ")
|
|
||||||
actual_chunks.append(current_chunk)
|
actual_chunks.append(current_chunk)
|
||||||
|
|
||||||
return actual_chunks
|
return actual_chunks
|
||||||
@@ -630,7 +627,6 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s
|
|||||||
ValueError: If no matching processor is found
|
ValueError: If no matching processor is found
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
current_app.logger.debug(f"Getting processor for catalog {catalog_id}, file type {file_type}, file sub_type {sub_file_type} ")
|
|
||||||
# Start with base query for catalog
|
# Start with base query for catalog
|
||||||
query = Processor.query.filter_by(catalog_id=catalog_id).filter_by(active=True)
|
query = Processor.query.filter_by(catalog_id=catalog_id).filter_by(active=True)
|
||||||
|
|
||||||
@@ -647,7 +643,6 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s
|
|||||||
if not available_processors:
|
if not available_processors:
|
||||||
raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type)
|
raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type)
|
||||||
available_processor_types = [processor.type for processor in available_processors]
|
available_processor_types = [processor.type for processor in available_processors]
|
||||||
current_app.logger.debug(f"Available processors for catalog {catalog_id}: {available_processor_types}")
|
|
||||||
|
|
||||||
# Find processor type that handles this file type
|
# Find processor type that handles this file type
|
||||||
matching_processor_type = None
|
matching_processor_type = None
|
||||||
@@ -657,17 +652,13 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s
|
|||||||
supported_types = config['file_types']
|
supported_types = config['file_types']
|
||||||
if isinstance(supported_types, str):
|
if isinstance(supported_types, str):
|
||||||
supported_types = [t.strip() for t in supported_types.split(',')]
|
supported_types = [t.strip() for t in supported_types.split(',')]
|
||||||
current_app.logger.debug(f"Supported types for processor type {proc_type}: {supported_types}")
|
|
||||||
|
|
||||||
if file_type in supported_types:
|
if file_type in supported_types:
|
||||||
matching_processor_type = proc_type
|
matching_processor_type = proc_type
|
||||||
break
|
break
|
||||||
|
|
||||||
current_app.logger.debug(f"Processor type found for catalog {catalog_id}, file type {file_type}: {matching_processor_type}")
|
|
||||||
if not matching_processor_type:
|
if not matching_processor_type:
|
||||||
raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type)
|
raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type)
|
||||||
else:
|
|
||||||
current_app.logger.debug(f"Processor type found for file type: {file_type}: {matching_processor_type}")
|
|
||||||
|
|
||||||
processor = None
|
processor = None
|
||||||
for proc in available_processors:
|
for proc in available_processors:
|
||||||
@@ -678,7 +669,6 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s
|
|||||||
if not processor:
|
if not processor:
|
||||||
raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type)
|
raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type)
|
||||||
|
|
||||||
current_app.logger.debug(f"Processor found for catalog {catalog_id}, file type {file_type}: {processor}")
|
|
||||||
return processor
|
return processor
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ typing_extensions~=4.12.2
|
|||||||
babel~=2.16.0
|
babel~=2.16.0
|
||||||
dogpile.cache~=1.3.3
|
dogpile.cache~=1.3.3
|
||||||
python-docx~=1.1.2
|
python-docx~=1.1.2
|
||||||
crewai~=0.121.0
|
crewai~=0.140.0
|
||||||
sseclient~=0.0.27
|
sseclient~=0.0.27
|
||||||
termcolor~=2.5.0
|
termcolor~=2.5.0
|
||||||
mistral-common~=1.5.5
|
mistral-common~=1.5.5
|
||||||
|
|||||||
Reference in New Issue
Block a user