diff --git a/common/eveai_model/tracked_mistral_embeddings.py b/common/eveai_model/tracked_mistral_embeddings.py index 0a44fbf..2ccc461 100644 --- a/common/eveai_model/tracked_mistral_embeddings.py +++ b/common/eveai_model/tracked_mistral_embeddings.py @@ -44,7 +44,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings): for i in range(0, len(texts), self.batch_size): batch = texts[i:i + self.batch_size] batch_num = i // self.batch_size + 1 - current_app.logger.debug(f"Processing embedding batch {batch_num}, size: {len(batch)}") start_time = time.time() try: @@ -70,9 +69,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings): } current_event.log_llm_metrics(metrics) - current_app.logger.debug(f"Batch {batch_num} processed: {len(batch)} texts, " - f"{result.usage.total_tokens} tokens, {batch_time:.2f}s") - # If processing multiple batches, add a small delay to avoid rate limits if len(texts) > self.batch_size and i + self.batch_size < len(texts): time.sleep(0.25) # 250ms pause between batches @@ -82,7 +78,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings): # If a batch fails, try to process each text individually for j, text in enumerate(batch): try: - current_app.logger.debug(f"Attempting individual embedding for item {i + j}") single_start_time = time.time() single_result = self.client.embeddings.create( model=self.model, diff --git a/common/services/utils/human_answer_services.py b/common/services/utils/human_answer_services.py index ab1d0f2..c0f639f 100644 --- a/common/services/utils/human_answer_services.py +++ b/common/services/utils/human_answer_services.py @@ -18,8 +18,10 @@ class HumanAnswerServices: @staticmethod def check_additional_information(tenant_id: int, question: str, answer: str, language_iso: str) -> bool: - return HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso, - "check_additional_information", "Check Additional Information") + result = HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso, + "check_additional_information", "Check Additional Information") + + return result @staticmethod def get_answer_to_question(tenant_id: int, question: str, answer: str, language_iso: str) -> str: @@ -66,7 +68,6 @@ class HumanAnswerServices: chain = (setup | check_answer_prompt | structured_llm ) raw_answer = chain.invoke(prompt_params) - current_app.logger.debug(f"Raw answer: {raw_answer}") return raw_answer.answer @@ -89,7 +90,6 @@ class HumanAnswerServices: chain = (setup | check_answer_prompt | structured_llm) raw_answer = chain.invoke(prompt_params) - current_app.logger.debug(f"Raw answer: {raw_answer}") return raw_answer.answer diff --git a/common/utils/cache/translation_cache.py b/common/utils/cache/translation_cache.py index e9d1d5f..6abc24a 100644 --- a/common/utils/cache/translation_cache.py +++ b/common/utils/cache/translation_cache.py @@ -68,7 +68,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]): setattr(translation, column.name, value) - current_app.logger.debug(f"Translation Cache Retrieved: {translation}") metrics = { 'total_tokens': translation.prompt_tokens + translation.completion_tokens, 'prompt_tokens': translation.prompt_tokens, @@ -109,7 +108,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]): """ if not context: context = 'No context provided.' - current_app.logger.debug(f"Getting translation for text: {text[:10]}..., target_lang: {target_lang}, source_lang: {source_lang}, context: {context[:10]}...") def creator_func(hash_key: str) -> Optional[TranslationCache]: # Check if translation already exists in database @@ -125,8 +123,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]): 'time_elapsed': 0, 'interaction_type': 'LLM' } - current_app.logger.debug(f"Found existing translation in DB: {existing_translation.cache_key}") - current_app.logger.debug(f"Metrics: {metrics}") current_event.log_llm_metrics(metrics) db.session.commit() return existing_translation @@ -165,7 +161,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]): # Generate the hash key using your existing method hash_key = self._generate_cache_key(text, target_lang, source_lang, context) - current_app.logger.debug(f"Generated hash key: {hash_key}") # Pass the hash_key to the get method return self.get(creator_func, hash_key=hash_key) @@ -189,7 +184,6 @@ class TranslationCacheHandler(CacheHandler[TranslationCache]): def translate_text(self, text_to_translate: str, target_lang: str, source_lang: str = None, context: str = None) \ -> tuple[str, dict[str, int | float]]: target_language = current_app.config['SUPPORTED_LANGUAGE_ISO639_1_LOOKUP'][target_lang] - current_app.logger.debug(f"Target language: {target_language}") prompt_params = { "text_to_translate": text_to_translate, "target_language": target_language, diff --git a/common/utils/chat_utils.py b/common/utils/chat_utils.py index b8a0cd2..f7e334c 100644 --- a/common/utils/chat_utils.py +++ b/common/utils/chat_utils.py @@ -44,13 +44,11 @@ def get_default_chat_customisation(tenant_customisation=None): if isinstance(tenant_customisation, str): try: tenant_customisation = json.loads(tenant_customisation) - current_app.logger.debug(f"Converted JSON string to dict: {tenant_customisation}") except json.JSONDecodeError as e: current_app.logger.error(f"Error parsing JSON customisation: {e}") return default_customisation # Update with tenant customization - current_app.logger.debug(f"Tenant customisation - in default creation: {tenant_customisation}") if tenant_customisation: for key, value in tenant_customisation.items(): if key in customisation: diff --git a/common/utils/mail_utils.py b/common/utils/mail_utils.py index ff40848..292ee59 100644 --- a/common/utils/mail_utils.py +++ b/common/utils/mail_utils.py @@ -6,22 +6,17 @@ from flask import current_app def send_email(to_email, to_name, subject, html): - current_app.logger.debug(f"Sending email to {to_email} with subject {subject}") access_key = current_app.config['SW_EMAIL_ACCESS_KEY'] secret_key = current_app.config['SW_EMAIL_SECRET_KEY'] default_project_id = current_app.config['SW_PROJECT'] default_region = "fr-par" - current_app.logger.debug(f"Access Key: {access_key}\nSecret Key: {secret_key}\n" - f"Default Project ID: {default_project_id}\nDefault Region: {default_region}") client = Client( access_key=access_key, secret_key=secret_key, default_project_id=default_project_id, default_region=default_region ) - current_app.logger.debug(f"Scaleway Client Initialized") tem = TemV1Alpha1API(client) - current_app.logger.debug(f"Tem Initialized") from_ = CreateEmailRequestAddress(email=current_app.config['SW_EMAIL_SENDER'], name=current_app.config['SW_EMAIL_NAME']) to_ = CreateEmailRequestAddress(email=to_email, name=to_name) @@ -34,7 +29,6 @@ def send_email(to_email, to_name, subject, html): html=html, project_id=default_project_id, ) - current_app.logger.debug(f"Email sent to {to_email}") def html_to_text(html_content): diff --git a/common/utils/template_filters.py b/common/utils/template_filters.py index 07034ee..049179a 100644 --- a/common/utils/template_filters.py +++ b/common/utils/template_filters.py @@ -98,7 +98,6 @@ def get_pagination_html(pagination, endpoint, **kwargs): if page: is_active = 'active' if page == pagination.page else '' url = url_for(endpoint, page=page, **kwargs) - current_app.logger.debug(f"URL for page {page}: {url}") html.append(f'
  • {page}
  • ') else: html.append('
  • ...
  • ') diff --git a/config/agents/globals/RAG_AGENT/1.1.0.yaml b/config/agents/globals/RAG_AGENT/1.1.0.yaml index ea8fefd..68709d8 100644 --- a/config/agents/globals/RAG_AGENT/1.1.0.yaml +++ b/config/agents/globals/RAG_AGENT/1.1.0.yaml @@ -4,7 +4,8 @@ role: > {tenant_name} Spokesperson. {custom_role} goal: > You get questions by a human correspondent, and give answers based on a given context, taking into account the history - of the current conversation. {custom_goal} + of the current conversation. + {custom_goal} backstory: > You are the primary contact for {tenant_name}. You are known by {name}, and can be addressed by this name, or you. You are a very good communicator, and adapt to the style used by the human asking for information (e.g. formal or informal). @@ -13,7 +14,7 @@ backstory: > language the context provided to you is in. You are participating in a conversation, not writing e.g. an email. Do not include a salutation or closing greeting in your answer. {custom_backstory} -full_model_name: "mistral.mistral-small-latest" +full_model_name: "mistral.mistral-medium-latest" temperature: 0.3 metadata: author: "Josako" diff --git a/config/prompts/globals/check_additional_information/1.0.0.yaml b/config/prompts/globals/check_additional_information/1.0.0.yaml index 9976165..357d083 100644 --- a/config/prompts/globals/check_additional_information/1.0.0.yaml +++ b/config/prompts/globals/check_additional_information/1.0.0.yaml @@ -1,13 +1,17 @@ version: "1.0.0" content: > - Check if additional information or questions are available in the following answer (answer in between triple - backquotes): + Check if there are other elements available in the provided text (in between triple $) than answers to the + following question (in between triple €): - ```{answer}``` + €€€ + {question} + €€€ - in addition to answers to the following question (in between triple backquotes): - - ```{question}``` + Provided text: + + $$$ + {answer} + $$$ Answer with True or False, without additional information. llm_model: "mistral.mistral-medium-latest" diff --git a/config/prompts/globals/history/1.0.0.yaml b/config/prompts/globals/history/1.0.0.yaml index 3e77e53..3ff1309 100644 --- a/config/prompts/globals/history/1.0.0.yaml +++ b/config/prompts/globals/history/1.0.0.yaml @@ -4,7 +4,7 @@ content: | question is understandable without that history. The conversation is a consequence of questions and context provided by the HUMAN, and the AI (you) answering back, in chronological order. The most recent (i.e. last) elements are the most important when detailing the question. - You answer by stating the detailed question in {language}. + You return the only the detailed question in {language}. Without any additional information. History: ```{history}``` Question to be detailed: diff --git a/config/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1.4.0.yaml b/config/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1.4.0.yaml index 219f7ea..d66f4d0 100644 --- a/config/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1.4.0.yaml +++ b/config/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1.4.0.yaml @@ -93,7 +93,7 @@ arguments: name: "Interaction Mode" type: "enum" description: "The interaction mode the specialist will start working in." - allowed_values: ["orientation", "seduction"] + allowed_values: ["orientation", "selection"] default: "orientation" required: true results: diff --git a/config/tasks/globals/RAG_TASK/1.1.0.yaml b/config/tasks/globals/RAG_TASK/1.1.0.yaml index ad3a714..a941103 100644 --- a/config/tasks/globals/RAG_TASK/1.1.0.yaml +++ b/config/tasks/globals/RAG_TASK/1.1.0.yaml @@ -8,14 +8,14 @@ task_description: > Use the following {language} in your communication, and cite the sources used at the end of the full conversation. If the question cannot be answered using the given context, answer "I have insufficient information to answer this question." - Context (in between triple backquotes): - ```{context}``` - History (in between triple backquotes): - ```{history}``` - Question (in between triple backquotes): - ```{question}``` + Context (in between triple $): + $$${context}$$$ + History (in between triple €): + €€€{history}€€€ + Question (in between triple £): + £££{question}£££ expected_output: > - + Your answer. metadata: author: "Josako" date_added: "2025-01-08" diff --git a/eveai_app/views/basic_views.py b/eveai_app/views/basic_views.py index eefba77..544e523 100644 --- a/eveai_app/views/basic_views.py +++ b/eveai_app/views/basic_views.py @@ -121,7 +121,6 @@ def view_content(content_type): content_type (str): Type content (eg. 'changelog', 'terms', 'privacy') """ try: - current_app.logger.debug(f"Showing content {content_type}") major_minor = request.args.get('version') patch = request.args.get('patch') @@ -163,5 +162,4 @@ def view_content(content_type): @roles_accepted('Super User', 'Partner Admin', 'Tenant Admin') def release_notes(): """Doorverwijzen naar de nieuwe content view voor changelog""" - current_app.logger.debug(f"Redirecting to content viewer") return redirect(prefixed_url_for('basic_bp.view_content', content_type='changelog')) diff --git a/eveai_app/views/document_forms.py b/eveai_app/views/document_forms.py index 04a74bd..9c11cab 100644 --- a/eveai_app/views/document_forms.py +++ b/eveai_app/views/document_forms.py @@ -137,14 +137,12 @@ class RetrieverForm(FlaskForm): super().__init__(*args, **kwargs) tenant_id = session.get('tenant').get('id') choices = TenantServices.get_available_types_for_tenant(tenant_id, "retrievers") - current_app.logger.debug(f"Potential choices: {choices}") # Dynamically populate the 'type' field using the constructor type_choices = [] for key, value in choices.items(): valid_catalog_types = value.get('valid_catalog_types', None) if valid_catalog_types: catalog_type = session.get('catalog').get('type') - current_app.logger.debug(f"Check {catalog_type} in {valid_catalog_types}") if catalog_type in valid_catalog_types: type_choices.append((key, value['name'])) else: # Retriever type is valid for all catalog types diff --git a/eveai_app/views/document_views.py b/eveai_app/views/document_views.py index b410135..de6dd26 100644 --- a/eveai_app/views/document_views.py +++ b/eveai_app/views/document_views.py @@ -668,7 +668,6 @@ def handle_document_version_selection(): return redirect(prefixed_url_for('document_bp.document_versions_list')) action = request.form['action'] - current_app.logger.debug(f'Action: {action}') match action: case 'edit_document_version': @@ -747,7 +746,6 @@ def document_versions_list(): @document_bp.route('/view_document_version_markdown/', methods=['GET']) @roles_accepted('Super User', 'Partner Admin', 'Tenant Admin') def view_document_version_markdown(document_version_id): - current_app.logger.debug(f'Viewing document version markdown {document_version_id}') # Retrieve document version document_version = DocumentVersion.query.get_or_404(document_version_id) @@ -759,7 +757,6 @@ def view_document_version_markdown(document_version_id): markdown_filename = f"{document_version.id}.md" markdown_object_name = minio_client.generate_object_name(document_version.doc_id, document_version.language, document_version.id, markdown_filename) - current_app.logger.debug(f'Markdown object name: {markdown_object_name}') # Download actual markdown file file_data = minio_client.download_document_file( tenant_id, @@ -769,7 +766,6 @@ def view_document_version_markdown(document_version_id): # Decodeer de binaire data naar UTF-8 tekst markdown_content = file_data.decode('utf-8') - current_app.logger.debug(f'Markdown content: {markdown_content}') # Render de template met de markdown inhoud return render_template( diff --git a/eveai_app/views/dynamic_form_base.py b/eveai_app/views/dynamic_form_base.py index fa6b5bf..a7525c5 100644 --- a/eveai_app/views/dynamic_form_base.py +++ b/eveai_app/views/dynamic_form_base.py @@ -66,8 +66,6 @@ class OrderedListField(TextAreaField): else: existing_render_kw = {} - current_app.logger.debug(f"incomming render_kw for ordered list field: {existing_render_kw}") - # Stel nieuwe render_kw samen new_render_kw = { 'data-list-type': list_type, @@ -91,8 +89,6 @@ class OrderedListField(TextAreaField): if key != 'class': # Klassen hebben we al verwerkt new_render_kw[key] = value - current_app.logger.debug(f"final render_kw for ordered list field: {new_render_kw}") - # Update kwargs met de nieuwe gecombineerde render_kw kwargs['render_kw'] = new_render_kw @@ -327,7 +323,6 @@ class DynamicFormBase(FlaskForm): for the collection_name and may also contain list_type definitions initial_data: Optional initial data for the fields """ - current_app.logger.debug(f"Adding dynamic fields for collection {collection_name} with config: {config}") if isinstance(initial_data, str): try: @@ -535,7 +530,7 @@ class DynamicFormBase(FlaskForm): list_types[list_type] = specialist_config[list_type] break except Exception as e: - current_app.logger.debug(f"Error checking specialist {specialist_type}: {e}") + current_app.logger.error(f"Error checking specialist {specialist_type}: {e}") continue except Exception as e: current_app.logger.error(f"Error retrieving specialist configurations: {e}") @@ -575,7 +570,6 @@ class DynamicFormBase(FlaskForm): # Parse JSON for special field types if field.type == 'BooleanField': data[original_field_name] = full_field_name in self.raw_formdata - current_app.logger.debug(f"Value for {original_field_name} is {data[original_field_name]}") elif isinstance(field, (TaggingFieldsField, TaggingFieldsFilterField, DynamicArgumentsField, OrderedListField)) and field.data: try: data[original_field_name] = json.loads(field.data) diff --git a/eveai_app/views/interaction_views.py b/eveai_app/views/interaction_views.py index 295806d..7f1a395 100644 --- a/eveai_app/views/interaction_views.py +++ b/eveai_app/views/interaction_views.py @@ -76,7 +76,6 @@ def handle_chat_session_selection(): cs_id = ast.literal_eval(chat_session_identification).get('value') action = request.form['action'] - current_app.logger.debug(f'Handle Chat Session Selection Action: {action}') match action: case 'view_chat_session': @@ -503,7 +502,6 @@ def execute_specialist(specialist_id): if form.validate_on_submit(): # We're only interested in gathering the dynamic arguments arguments = form.get_dynamic_data("arguments") - current_app.logger.debug(f"Executing specialist {specialist.id} with arguments: {arguments}") session_id = SpecialistServices.start_session() execution_task = SpecialistServices.execute_specialist( tenant_id=session.get('tenant').get('id'), @@ -512,7 +510,6 @@ def execute_specialist(specialist_id): session_id=session_id, user_timezone=session.get('tenant').get('timezone') ) - current_app.logger.debug(f"Execution task for specialist {specialist.id} created: {execution_task}") return redirect(prefixed_url_for('interaction_bp.session_interactions_by_session_id', session_id=session_id)) return render_template('interaction/execute_specialist.html', form=form) @@ -620,7 +617,6 @@ def specialist_magic_link(): # Define the make valid for this magic link specialist = Specialist.query.get(new_specialist_magic_link.specialist_id) make_id = specialist.configuration.get('make', None) - current_app.logger.debug(f"make_id defined in specialist: {make_id}") if make_id: new_specialist_magic_link.tenant_make_id = make_id elif session.get('tenant').get('default_tenant_make_id'): @@ -707,10 +703,6 @@ def edit_specialist_magic_link(specialist_magic_link_id): # Store the data URI in the form data form.qr_code_url.data = data_uri - - current_app.logger.debug(f"QR code generated successfully for {magic_link_code}") - current_app.logger.debug(f"QR code data URI starts with: {data_uri[:50]}...") - except Exception as e: current_app.logger.error(f"Failed to generate QR code: {str(e)}") form.qr_code_url.data = "Error generating QR code" @@ -794,7 +786,6 @@ def assets(): def handle_asset_selection(): action = request.form.get('action') asset_id = request.form.get('selected_row') - current_app.logger.debug(f"Action: {action}, Asset ID: {asset_id}") if action == 'edit_asset': return redirect(prefixed_url_for('interaction_bp.edit_asset', asset_id=asset_id)) diff --git a/eveai_app/views/list_views/assets_list_view.py b/eveai_app/views/list_views/assets_list_view.py index ae8a075..e408ca9 100644 --- a/eveai_app/views/list_views/assets_list_view.py +++ b/eveai_app/views/list_views/assets_list_view.py @@ -51,7 +51,6 @@ class AssetsListView(FilteredListView): else: return '' - current_app.logger.debug(f"Assets retrieved: {pagination.items}") rows = [ [ {'value': item.id, 'class': '', 'type': 'text'}, diff --git a/eveai_app/views/list_views/document_list_view.py b/eveai_app/views/list_views/document_list_view.py index 95f7910..bd11bf7 100644 --- a/eveai_app/views/list_views/document_list_view.py +++ b/eveai_app/views/list_views/document_list_view.py @@ -11,7 +11,6 @@ class DocumentListView(FilteredListView): def get_query(self): catalog_id = session.get('catalog_id') - current_app.logger.debug(f"Catalog ID: {catalog_id}") return Document.query.filter_by(catalog_id=catalog_id) def apply_filters(self, query): @@ -57,7 +56,6 @@ class DocumentListView(FilteredListView): else: return '' - current_app.logger.debug(f"Items retrieved: {pagination.items}") rows = [ [ {'value': item.id, 'class': '', 'type': 'text'}, diff --git a/eveai_app/views/list_views/full_document_list_view.py b/eveai_app/views/list_views/full_document_list_view.py index 5c81107..33d5ba8 100644 --- a/eveai_app/views/list_views/full_document_list_view.py +++ b/eveai_app/views/list_views/full_document_list_view.py @@ -19,7 +19,6 @@ class FullDocumentListView(FilteredListView): def get_query(self): catalog_id = session.get('catalog_id') - current_app.logger.debug(f"Catalog ID: {catalog_id}") # Fix: Selecteer alleen de id kolom in de subquery latest_version_subquery = ( diff --git a/eveai_app/views/partner_views.py b/eveai_app/views/partner_views.py index 1b68f84..a1bb6f0 100644 --- a/eveai_app/views/partner_views.py +++ b/eveai_app/views/partner_views.py @@ -57,7 +57,6 @@ def edit_partner(partner_id): form.tenant.data = tenant.name if form.validate_on_submit(): - current_app.logger.debug(f"Form data for Partner: {form.data}") # Populate the user with form data form.populate_obj(partner) update_logging_information(partner, dt.now(tz.utc)) @@ -88,8 +87,6 @@ def partners(): Tenant.name.label('name') ).join(Tenant, Partner.tenant_id == Tenant.id).order_by(Partner.id)) - current_app.logger.debug(f'{format_query_results(query)}') - pagination = query.paginate(page=page, per_page=per_page) the_partners = pagination.items @@ -170,17 +167,10 @@ def edit_partner_service(partner_service_id): form.add_dynamic_fields("configuration", partner_service_config, partner_service.configuration) form.add_dynamic_fields("permissions", partner_service_config, partner_service.permissions) - if request.method == 'POST': - current_app.logger.debug(f"Form returned: {form.data}") - raw_form_data = request.form.to_dict() - current_app.logger.debug(f"Raw form data: {raw_form_data}") - if form.validate_on_submit(): form.populate_obj(partner_service) partner_service.configuration = form.get_dynamic_data('configuration') partner_service.permissions = form.get_dynamic_data('permissions') - current_app.logger.debug(f"Partner Service configuration: {partner_service.configuration}") - current_app.logger.debug(f"Partner Service permissions: {partner_service.permissions}") update_logging_information(partner_service, dt.now(tz.utc)) diff --git a/eveai_app/views/user_forms.py b/eveai_app/views/user_forms.py index 4eadc45..41bba46 100644 --- a/eveai_app/views/user_forms.py +++ b/eveai_app/views/user_forms.py @@ -172,11 +172,6 @@ def validate_make_name(form, field): # Check if tenant_make already exists in the database existing_make = TenantMake.query.filter_by(name=field.data).first() - if existing_make: - current_app.logger.debug(f'Existing make: {existing_make.id}') - current_app.logger.debug(f'Form has id: {hasattr(form, 'id')}') - if hasattr(form, 'id'): - current_app.logger.debug(f'Form has id: {form.id.data}') if existing_make: if not hasattr(form, 'id') or form.id.data != existing_make.id: raise ValidationError(f'A Make with name "{field.data}" already exists. Choose another name.') diff --git a/eveai_app/views/user_views.py b/eveai_app/views/user_views.py index 5450126..af7fbbe 100644 --- a/eveai_app/views/user_views.py +++ b/eveai_app/views/user_views.py @@ -147,12 +147,8 @@ def select_tenant(): # Start with a base query query = Tenant.query - current_app.logger.debug("We proberen het scherm op te bouwen") - current_app.logger.debug(f"Session: {session}") - # Apply different filters based on user role if current_user.has_roles('Partner Admin') and 'partner' in session: - current_app.logger.debug("We zitten in partner mode") # Get the partner's management service management_service = next((service for service in session['partner']['services'] if service.get('type') == 'MANAGEMENT_SERVICE'), None) @@ -175,7 +171,6 @@ def select_tenant(): # Filter query to only show allowed tenants query = query.filter(Tenant.id.in_(allowed_tenant_ids)) - current_app.logger.debug("We zitten na partner service selectie") # Apply form filters (for both Super User and Partner Admin) if filter_form.validate_on_submit(): if filter_form.types.data: @@ -722,9 +717,7 @@ def edit_tenant_make(tenant_make_id): form.populate_obj(tenant_make) tenant_make.chat_customisation_options = form.get_dynamic_data("configuration") # Verwerk allowed_languages als array - current_app.logger.debug(f"Allowed languages: {form.allowed_languages.data}") tenant_make.allowed_languages = form.allowed_languages.data if form.allowed_languages.data else None - current_app.logger.debug(f"Updated allowed languages: {tenant_make.allowed_languages}") # Update logging information update_logging_information(tenant_make, dt.now(tz.utc)) diff --git a/eveai_chat_workers/outputs/globals/q_a_output/q_a_output_v1_0.py b/eveai_chat_workers/outputs/globals/q_a_output/q_a_output_v1_0.py index 5f02e6b..8ae6a7a 100644 --- a/eveai_chat_workers/outputs/globals/q_a_output/q_a_output_v1_0.py +++ b/eveai_chat_workers/outputs/globals/q_a_output/q_a_output_v1_0.py @@ -4,4 +4,4 @@ from pydantic import BaseModel, Field class QAOutput(BaseModel): - answer: bool = Field(None, description="True or False") + answer: bool = Field(None, description="Your answer, True or False") diff --git a/eveai_chat_workers/outputs/globals/rag/rag_v1_0.py b/eveai_chat_workers/outputs/globals/rag/rag_v1_0.py index 23d22a9..494687d 100644 --- a/eveai_chat_workers/outputs/globals/rag/rag_v1_0.py +++ b/eveai_chat_workers/outputs/globals/rag/rag_v1_0.py @@ -4,6 +4,6 @@ from pydantic import BaseModel, Field class RAGOutput(BaseModel): - answer: str = Field(None, description="Answer to the questions asked") + answer: str = Field(None, description="Answer to the questions asked, in Markdown format.") insufficient_info: bool = Field(None, description="An indication if there's insufficient information to answer") diff --git a/eveai_chat_workers/retrievers/base_retriever.py b/eveai_chat_workers/retrievers/base_retriever.py index 9f2b2be..bd0b7c3 100644 --- a/eveai_chat_workers/retrievers/base_retriever.py +++ b/eveai_chat_workers/retrievers/base_retriever.py @@ -108,6 +108,5 @@ def get_retriever_class(retriever_type: str, type_version: str): module_path = f"eveai_chat_workers.retrievers.{partner}.{retriever_type}.{major_minor}" else: module_path = f"eveai_chat_workers.retrievers.globals.{retriever_type}.{major_minor}" - current_app.logger.debug(f"Importing retriever class from {module_path}") module = importlib.import_module(module_path) return module.RetrieverExecutor \ No newline at end of file diff --git a/eveai_chat_workers/retrievers/globals/STANDARD_RAG/1_0.py b/eveai_chat_workers/retrievers/globals/STANDARD_RAG/1_0.py index bd27c71..c9392a1 100644 --- a/eveai_chat_workers/retrievers/globals/STANDARD_RAG/1_0.py +++ b/eveai_chat_workers/retrievers/globals/STANDARD_RAG/1_0.py @@ -116,8 +116,8 @@ class RetrieverExecutor(BaseRetriever): )) self.log_tuning('retrieve', { "arguments": arguments.model_dump(), - "similarity_threshold": self.similarity_threshold, - "k": self.k, + "similarity_threshold": similarity_threshold, + "k": k, "query": compiled_query, "Raw Results": str(results), "Processed Results": [r.model_dump() for r in processed_results], diff --git a/eveai_chat_workers/specialists/base_specialist.py b/eveai_chat_workers/specialists/base_specialist.py index 31473b9..e6dd070 100644 --- a/eveai_chat_workers/specialists/base_specialist.py +++ b/eveai_chat_workers/specialists/base_specialist.py @@ -135,11 +135,9 @@ def get_specialist_class(specialist_type: str, type_version: str): major_minor = '_'.join(type_version.split('.')[:2]) specialist_config = cache_manager.specialists_config_cache.get_config(specialist_type, type_version) partner = specialist_config.get("partner", None) - current_app.logger.debug(f"Specialist partner for {specialist_type} {type_version} is {partner}") if partner: module_path = f"eveai_chat_workers.specialists.{partner}.{specialist_type}.{major_minor}" else: module_path = f"eveai_chat_workers.specialists.globals.{specialist_type}.{major_minor}" - current_app.logger.debug(f"Importing specialist class from {module_path}") module = importlib.import_module(module_path) return module.SpecialistExecutor diff --git a/eveai_chat_workers/specialists/crewai_base_classes.py b/eveai_chat_workers/specialists/crewai_base_classes.py index 974ad07..bc5d3dd 100644 --- a/eveai_chat_workers/specialists/crewai_base_classes.py +++ b/eveai_chat_workers/specialists/crewai_base_classes.py @@ -40,7 +40,6 @@ class EveAICrewAIAgent(Agent): Returns: Output of the agent """ - current_app.logger.debug(f"Task Execution {task.name} by {self.name}") # with current_event.create_span(f"Task Execution {task.name} by {self.name}"): self.specialist.log_tuning(f"EveAI Agent {self.name}, Task {task.name} Start", {}) self.specialist.update_progress("EveAI Agent Task Start", @@ -134,11 +133,17 @@ class EveAICrewAIFlow(Flow): return self.state +class Citation(BaseModel): + document_id: int + document_version_id: int + url: str + + class EveAIFlowState(BaseModel): """Base class for all EveAI flow states""" answer: Optional[str] = None - detailed_question: Optional[str] = None question: Optional[str] = None phase: Optional[str] = None form_request: Optional[Dict[str, Any]] = None - citations: Optional[Dict[str, Any]] = None + citations: Optional[List[Citation]] = None + diff --git a/eveai_chat_workers/specialists/crewai_base_specialist.py b/eveai_chat_workers/specialists/crewai_base_specialist.py index 7076655..3bb1087 100644 --- a/eveai_chat_workers/specialists/crewai_base_specialist.py +++ b/eveai_chat_workers/specialists/crewai_base_specialist.py @@ -78,14 +78,15 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): return "\n\n".join([ "\n\n".join([ f"HUMAN:\n" - f"{interaction.specialist_results['detailed_question']}" - if interaction.specialist_results.get('detailed_question') else "", + f"{interaction.specialist_arguments['question']}" + if interaction.specialist_arguments.get('question') else "", f"{interaction.specialist_arguments.get('form_values')}" if interaction.specialist_arguments.get('form_values') else "", f"AI:\n{interaction.specialist_results['answer']}" if interaction.specialist_results.get('answer') else "" ]).strip() for interaction in self._cached_session.interactions + if interaction.specialist_arguments.get('question') != "Initialize" ]) def _add_task_agent(self, task_name: str, agent_name: str): @@ -120,10 +121,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): self._state_result_relations[state_name] = result_name def _config_default_state_result_relations(self): - for default_attribute_name in ['answer', 'detailed_question', 'form_request', 'phase', 'citations']: + for default_attribute_name in ['answer', 'form_request', 'phase', 'citations']: self._add_state_result_relation(default_attribute_name) - @abstractmethod def _config_state_result_relations(self): """Configure the state-result relations by adding state-result combinations. Use _add_state_result_relation()""" @@ -150,6 +150,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): agent_goal = agent_config.get('goal', '').replace('{custom_goal}', agent.goal or '') agent_goal = self._replace_system_variables(agent_goal) agent_backstory = agent_config.get('backstory', '').replace('{custom_backstory}', agent.backstory or '') + agent_backstory = self._replace_system_variables(agent_backstory) agent_full_model_name = agent_config.get('full_model_name', 'mistral.mistral-large-latest') agent_temperature = agent_config.get('temperature', 0.3) llm = get_crewai_llm(agent_full_model_name, agent_temperature) @@ -183,12 +184,9 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): "verbose": task.tuning } task_name = task.type.lower() - current_app.logger.debug(f"Task {task_name} is getting processed") if task_name in self._task_pydantic_outputs: task_kwargs["output_pydantic"] = self._task_pydantic_outputs[task_name] - current_app.logger.debug(f"Task {task_name} has an output pydantic: {self._task_pydantic_outputs[task_name]}") if task_name in self._task_agents: - current_app.logger.debug(f"Task {task_name} has an agent: {self._task_agents[task_name]}") task_kwargs["agent"] = self._agents[self._task_agents[task_name]] # Instantiate the task with dynamic arguments @@ -236,46 +234,6 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): The assets can be retrieved using their type name in lower case, e.g. rag_agent""" raise NotImplementedError - def _detail_question(self, language: str, question: str) -> str: - """Detail question based on conversation history""" - try: - with current_event.create_span("Specialist Detail Question"): - # Get LLM and template - template, llm = get_template("history", temperature=0.3) - language_template = create_language_template(template, language) - - # Create prompt - history_prompt = ChatPromptTemplate.from_template(language_template) - - # Create chain - chain = ( - history_prompt | - llm | - StrOutputParser() - ) - - # Execute chain - detailed_question = chain.invoke({ - "history": self.formatted_history, - "question": question - }) - - self.log_tuning("_detail_question", { - "cached_session_id": self._cached_session.session_id, - "cached_session.interactions": str(self._cached_session.interactions), - "original_question": question, - "history_used": self.formatted_history, - "detailed_question": detailed_question, - }) - - self.update_progress("Detail Question", {"name": self.type}) - - return detailed_question - - except Exception as e: - current_app.logger.error(f"Error detailing question: {e}") - return question # Fallback to original question - def _retrieve_context(self, arguments: SpecialistArguments) -> tuple[str, list[dict[str, Any]]]: with current_event.create_span("Specialist Retrieval"): self.log_tuning("Starting context retrieval", { @@ -283,12 +241,8 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): "all arguments": arguments.model_dump(), }) - original_question = arguments.question - detailed_question = self._detail_question(arguments.language, original_question) - modified_arguments = arguments.model_copy(update={ - "query": detailed_question, - "original_query": original_question + "query": arguments.question }) @@ -361,11 +315,8 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): update_data = {} state_dict = self.flow.state.model_dump() - current_app.logger.debug(f"Updating specialist results with state: {state_dict}") for state_name, result_name in self._state_result_relations.items(): - current_app.logger.debug(f"Try Updating {result_name} with {state_name}") if state_name in state_dict and state_dict[state_name] is not None: - current_app.logger.debug(f"Updating {result_name} with {state_name} = {state_dict[state_name]}") update_data[result_name] = state_dict[state_name] return specialist_results.model_copy(update=update_data) @@ -383,35 +334,22 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor): # Initialize the standard state values self.flow.state.answer = None - self.flow.state.detailed_question = None + self.flow.state.question = None self.flow.state.form_request = None self.flow.state.phase = None self.flow.state.citations = [] @abstractmethod - def execute(self, arguments: SpecialistArguments, formatted_context: str, citations: List[int]) -> SpecialistResult: + def execute(self, arguments: SpecialistArguments, + formatted_context: Optional[str], citations: Optional[list[dict[str, Any]]]) -> SpecialistResult: raise NotImplementedError def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult: - current_app.logger.debug(f"Retrievers for this specialist: {self.retrievers}") if self.retrievers: - # Detail the incoming query - if self._cached_session.interactions: - question = arguments.question - language = arguments.language - detailed_question = self._detail_question(language, question) - else: - detailed_question = arguments.question - - modified_arguments = { - "question": detailed_question, - "original_question": arguments.question - } - detailed_arguments = arguments.model_copy(update=modified_arguments) - formatted_context, citations = self._retrieve_context(detailed_arguments) - result = self.execute(detailed_arguments, formatted_context, citations) + formatted_context = None + citations = None + result = self.execute(arguments, formatted_context, citations) modified_result = { - "detailed_question": detailed_question, "citations": citations, } intermediate_result = result.model_copy(update=modified_result) diff --git a/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_0.py b/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_0.py index 8f360b3..25e7e6b 100644 --- a/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_0.py +++ b/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_0.py @@ -69,18 +69,12 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: self.log_tuning("RAG Specialist execution started", {}) - current_app.logger.debug(f"Arguments: {arguments.model_dump()}") - current_app.logger.debug(f"Formatted Context: {formatted_context}") - current_app.logger.debug(f"Formatted History: {self._formatted_history}") - current_app.logger.debug(f"Cached Chat Session: {self._cached_session}") - if not self._cached_session.interactions: specialist_phase = "initial" else: specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial') results = None - current_app.logger.debug(f"Specialist Phase: {specialist_phase}") match specialist_phase: case "initial": @@ -191,7 +185,6 @@ class RAGFlow(EveAICrewAIFlow[RAGFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") self.state.input = RAGSpecialistInput.model_validate(inputs) result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_1.py b/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_1.py index 74199ad..cae07db 100644 --- a/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_1.py +++ b/eveai_chat_workers/specialists/globals/RAG_SPECIALIST/1_1.py @@ -1,6 +1,6 @@ import json from os import wait -from typing import Optional, List +from typing import Optional, List, Dict, Any from crewai.flow.flow import start, listen, and_ from flask import current_app @@ -47,6 +47,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def _config_state_result_relations(self): self._add_state_result_relation("rag_output") + self._add_state_result_relation("citations") def _instantiate_specialist(self): verbose = self.tuning @@ -69,18 +70,12 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: self.log_tuning("RAG Specialist execution started", {}) - current_app.logger.debug(f"Arguments: {arguments.model_dump()}") - current_app.logger.debug(f"Formatted Context: {formatted_context}") - current_app.logger.debug(f"Formatted History: {self._formatted_history}") - current_app.logger.debug(f"Cached Chat Session: {self._cached_session}") - if not self._cached_session.interactions: specialist_phase = "initial" else: specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial') results = None - current_app.logger.debug(f"Specialist Phase: {specialist_phase}") match specialist_phase: case "initial": @@ -112,6 +107,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): INSUFFICIENT_INFORMATION_MESSAGE, arguments.language) + formatted_context, citations = self._retrieve_context(arguments) + if formatted_context: flow_inputs = { "language": arguments.language, @@ -128,16 +125,18 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): flow_results.rag_output.answer = insufficient_info_message rag_output = flow_results.rag_output - else: rag_output = RAGOutput(answer=insufficient_info_message, insufficient_info=True) self.flow.state.rag_output = rag_output + self.flow.state.citations = citations self.flow.state.answer = rag_output.answer self.flow.state.phase = "rag" results = RAGSpecialistResult.create_for_type(self.type, self.type_version) + return results + class RAGSpecialistInput(BaseModel): language: Optional[str] = Field(None, alias="language") @@ -156,6 +155,7 @@ class RAGFlowState(EveAIFlowState): """Flow state for RAG specialist that automatically updates from task outputs""" input: Optional[RAGSpecialistInput] = None rag_output: Optional[RAGOutput] = None + citations: Optional[List[Dict[str, Any]]] = None class RAGFlow(EveAICrewAIFlow[RAGFlowState]): @@ -190,8 +190,6 @@ class RAGFlow(EveAICrewAIFlow[RAGFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") - current_app.logger.debug(f"Inputs: {inputs}") self.state.input = RAGSpecialistInput.model_validate(inputs) result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/globals/SPIN_SPECIALIST/1_0.py b/eveai_chat_workers/specialists/globals/SPIN_SPECIALIST/1_0.py index 5979805..b6d4651 100644 --- a/eveai_chat_workers/specialists/globals/SPIN_SPECIALIST/1_0.py +++ b/eveai_chat_workers/specialists/globals/SPIN_SPECIALIST/1_0.py @@ -216,9 +216,7 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]): async def execute_rag(self): inputs = self.state.input.model_dump() try: - current_app.logger.debug("In execute_rag") crew_output = await self.rag_crew.kickoff_async(inputs=inputs) - current_app.logger.debug(f"Crew execution ended with output:\n{crew_output}") self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump()) output_pydantic = crew_output.pydantic if not output_pydantic: @@ -277,11 +275,8 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]): if self.state.spin: additional_questions = additional_questions + self.state.spin.questions inputs["additional_questions"] = additional_questions - current_app.logger.debug(f"Prepared Answers: \n{inputs['prepared_answers']}") - current_app.logger.debug(f"Additional Questions: \n{additional_questions}") try: crew_output = await self.rag_consolidation_crew.kickoff_async(inputs=inputs) - current_app.logger.debug(f"Consolidation output after crew execution:\n{crew_output}") self.specialist_executor.log_tuning("RAG Consolidation Crew Output", crew_output.model_dump()) output_pydantic = crew_output.pydantic if not output_pydantic: @@ -295,7 +290,6 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") self.state.input = SPINSpecialistInput.model_validate(inputs) result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/specialist_typing.py b/eveai_chat_workers/specialists/specialist_typing.py index b337194..693d053 100644 --- a/eveai_chat_workers/specialists/specialist_typing.py +++ b/eveai_chat_workers/specialists/specialist_typing.py @@ -1,4 +1,4 @@ -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, List from pydantic import BaseModel, Field, model_validator from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments from common.extensions import cache_manager @@ -103,10 +103,9 @@ class SpecialistResult(BaseModel): # Structural optional fields available for all specialists answer: Optional[str] = Field(None, description="Optional textual answer from the specialist") - detailed_question: Optional[str] = Field(None, description="Optional detailed question for the specialist") form_request: Optional[Dict[str, Any]] = Field(None, description="Optional form definition to request user input") phase: Optional[str] = Field(None, description="Phase of the specialist's workflow") - citations: Optional[Dict[str, Any]] = Field(None, description="Citations for the specialist's answer") + citations: Optional[List[Dict[str, Any]]] = Field(None, description="Citations for the specialist's answer") @model_validator(mode='after') def validate_required_results(self) -> 'SpecialistResult': diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_KO_INTERVIEW_DEFINITION_SPECIALIST/1_0.py b/eveai_chat_workers/specialists/traicie/TRAICIE_KO_INTERVIEW_DEFINITION_SPECIALIST/1_0.py index e989be7..2cece35 100644 --- a/eveai_chat_workers/specialists/traicie/TRAICIE_KO_INTERVIEW_DEFINITION_SPECIALIST/1_0.py +++ b/eveai_chat_workers/specialists/traicie/TRAICIE_KO_INTERVIEW_DEFINITION_SPECIALIST/1_0.py @@ -71,11 +71,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: self.log_tuning("Traicie KO Criteria Interview Definition Specialist execution started", {}) - current_app.logger.debug(f"Arguments: {arguments.model_dump()}") - current_app.logger.debug(f"Formatted Context: {formatted_context}") - current_app.logger.debug(f"Formatted History: {self._formatted_history}") - current_app.logger.debug(f"Cached Chat Session: {self._cached_session}") - if not self._cached_session.interactions: specialist_phase = "initial" else: @@ -104,13 +99,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id, "Specialist is no Selection Specialist") - current_app.logger.debug(f"Specialist Competencies:\n" - f"{selection_specialist.configuration.get("competencies", [])}") - ko_competencies = [] for competency in selection_specialist.configuration.get("competencies", []): if competency["is_knockout"] is True and competency["assess"] is True: - current_app.logger.debug(f"Assessable Knockout competency: {competency}") ko_competencies.append({"title": competency["title"], "description": competency["description"]}) tone_of_voice = selection_specialist.configuration.get('tone_of_voice', 'Professional & Neutral') @@ -118,7 +109,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): (item for item in TONE_OF_VOICE if item["name"] == tone_of_voice), None # fallback indien niet gevonden ) - current_app.logger.debug(f"Selected tone of voice: {selected_tone_of_voice}") tone_of_voice_context = f"{selected_tone_of_voice["description"]}" language_level = selection_specialist.configuration.get('language_level', 'Standard') @@ -126,7 +116,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): (item for item in LANGUAGE_LEVEL if item["name"] == language_level), None ) - current_app.logger.debug(f"Selected language level: {selected_language_level}") language_level_context = (f"{selected_language_level['description']}, " f"corresponding to CEFR level {selected_language_level['cefr_level']}") @@ -140,12 +129,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): } flow_results = self.flow.kickoff(inputs=flow_inputs) - current_app.logger.debug(f"Flow results: {flow_results}") - current_app.logger.debug(f"Flow state: {self.flow.state}") new_type = "TRAICIE_KO_CRITERIA_QUESTIONS" - current_app.logger.debug(f"KO Criteria Questions:\n {self.flow.state.ko_questions}") # Controleer of we een KOQuestions object hebben of een lijst van KOQuestion objecten if hasattr(self.flow.state.ko_questions, 'to_json'): # Het is een KOQuestions object @@ -161,8 +147,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): ko_questions_data = [q.model_dump() for q in self.flow.state.ko_questions] json_str = json.dumps(ko_questions_data, ensure_ascii=False, indent=2) - current_app.logger.debug(f"KO Criteria Questions json style:\n {json_str}") - try: asset = db.session.query(EveAIAsset).filter( EveAIAsset.type == new_type, @@ -281,21 +265,17 @@ class KOFlow(EveAICrewAIFlow[KOFlowState]): async def execute_ko_def_definition(self): inputs = self.state.input.model_dump() try: - current_app.logger.debug("Run execute_ko_interview_definition") crew_output = await self.ko_def_crew.kickoff_async(inputs=inputs) # Unfortunately, crew_output will only contain the output of the latest task. # As we will only take into account the flow state, we need to ensure both competencies and criteria # are copies to the flow state. update = {} for task in self.ko_def_crew.tasks: - current_app.logger.debug(f"Task {task.name} output:\n{task.output}") if task.name == "traicie_ko_criteria_interview_definition_task": # update["competencies"] = task.output.pydantic.competencies self.state.ko_questions = task.output.pydantic.ko_questions # crew_output.pydantic = crew_output.pydantic.model_copy(update=update) self.state.phase = "personal_contact_data" - current_app.logger.debug(f"State after execute_ko_def_definition: {self.state}") - current_app.logger.debug(f"State dump after execute_ko_def_definition: {self.state.model_dump()}") return crew_output except Exception as e: current_app.logger.error(f"CREW execute_ko_def Kickoff Error: {str(e)}") @@ -303,9 +283,6 @@ class KOFlow(EveAICrewAIFlow[KOFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") - current_app.logger.debug(f"Inputs: {inputs}") self.state.input = KODefInput.model_validate(inputs) - current_app.logger.debug(f"State: {self.state}") result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_0.py b/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_0.py index cd368f3..1f8c203 100644 --- a/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_0.py +++ b/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_0.py @@ -143,8 +143,6 @@ class VacancyDefinitionFlow(EveAICrewAIFlow[VacancyDefFlowState]): # update["criteria"] = task.output.pydantic.criteria self.state.criteria = task.output.pydantic.criteria # crew_output.pydantic = crew_output.pydantic.model_copy(update=update) - current_app.logger.debug(f"State after execute_vac_def: {self.state}") - current_app.logger.debug(f"State dump after execute_vac_def: {self.state.model_dump()}") return crew_output except Exception as e: current_app.logger.error(f"CREW execute_vac_def Kickoff Error: {str(e)}") @@ -152,7 +150,6 @@ class VacancyDefinitionFlow(EveAICrewAIFlow[VacancyDefFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") self.state.input = VacancyDefinitionSpecialistInput.model_validate(inputs) result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_2.py b/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_2.py index 7497ccd..bb383e2 100644 --- a/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_2.py +++ b/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_2.py @@ -168,20 +168,16 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]): async def execute_role_definition (self): inputs = self.state.input.model_dump() try: - current_app.logger.debug("In execute_role_definition") crew_output = await self.role_definition_crew.kickoff_async(inputs=inputs) # Unfortunately, crew_output will only contain the output of the latest task. # As we will only take into account the flow state, we need to ensure both competencies and criteria # are copies to the flow state. update = {} for task in self.role_definition_crew.tasks: - current_app.logger.debug(f"Task {task.name} output:\n{task.output}") if task.name == "traicie_get_competencies_task": # update["competencies"] = task.output.pydantic.competencies self.state.competencies = task.output.pydantic.competencies # crew_output.pydantic = crew_output.pydantic.model_copy(update=update) - current_app.logger.debug(f"State after execute_role_definition: {self.state}") - current_app.logger.debug(f"State dump after execute_role_definition: {self.state.model_dump()}") return crew_output except Exception as e: current_app.logger.error(f"CREW execute_role_definition Kickoff Error: {str(e)}") @@ -189,9 +185,6 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") - current_app.logger.debug(f"Inputs: {inputs}") self.state.input = RoleDefinitionSpecialistInput.model_validate(inputs) - current_app.logger.debug(f"State: {self.state}") result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_3.py b/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_3.py index 214ef6f..ba8a0d0 100644 --- a/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_3.py +++ b/eveai_chat_workers/specialists/traicie/TRAICIE_ROLE_DEFINITION_SPECIALIST/1_3.py @@ -174,20 +174,16 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]): async def execute_role_definition (self): inputs = self.state.input.model_dump() try: - current_app.logger.debug("In execute_role_definition") crew_output = await self.role_definition_crew.kickoff_async(inputs=inputs) # Unfortunately, crew_output will only contain the output of the latest task. # As we will only take into account the flow state, we need to ensure both competencies and criteria # are copies to the flow state. update = {} for task in self.role_definition_crew.tasks: - current_app.logger.debug(f"Task {task.name} output:\n{task.output}") if task.name == "traicie_get_competencies_task": # update["competencies"] = task.output.pydantic.competencies self.state.competencies = task.output.pydantic.competencies # crew_output.pydantic = crew_output.pydantic.model_copy(update=update) - current_app.logger.debug(f"State after execute_role_definition: {self.state}") - current_app.logger.debug(f"State dump after execute_role_definition: {self.state.model_dump()}") return crew_output except Exception as e: current_app.logger.error(f"CREW execute_role_definition Kickoff Error: {str(e)}") @@ -195,9 +191,6 @@ class RoleDefinitionFlow(EveAICrewAIFlow[RoleDefFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") - current_app.logger.debug(f"Inputs: {inputs}") self.state.input = RoleDefinitionSpecialistInput.model_validate(inputs) - current_app.logger.debug(f"State: {self.state}") result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py b/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py index f5089dd..88a56d5 100644 --- a/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py +++ b/eveai_chat_workers/specialists/traicie/TRAICIE_SELECTION_SPECIALIST/1_4.py @@ -61,6 +61,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): # Load the Tenant & set language self.tenant = Tenant.query.get_or_404(tenant_id) + self.specialist_phase = "initial" @property def type(self) -> str: @@ -106,19 +107,13 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult: self.log_tuning("Traicie Selection Specialist execution started", {}) - current_app.logger.debug(f"Arguments: {arguments.model_dump()}") - current_app.logger.debug(f"Formatted Context: {formatted_context}") - current_app.logger.debug(f"Formatted History: {self._formatted_history}") - current_app.logger.debug(f"Cached Chat Session: {self._cached_session}") - if not self._cached_session.interactions: - specialist_phase = "initial" + self.specialist_phase = "initial" else: - specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial') + self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial') results = None - current_app.logger.debug(f"Specialist phase: {specialist_phase}") - match specialist_phase: + match self.specialist_phase: case "initial": results = self.execute_initial_state(arguments, formatted_context, citations) case "start_selection_procedure": @@ -149,16 +144,21 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): interaction_mode = arguments.interaction_mode if not interaction_mode: interaction_mode = "selection" - current_app.logger.debug(f"Interaction mode: {interaction_mode}") - welcome_message = self.specialist.configuration.get("welcome_message", "Welcome to our selection process.") welcome_message = TranslationServices.translate(self.tenant_id, welcome_message, arguments.language) if interaction_mode == "selection": return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, welcome_message) - else: # We are in orientation mode, so we perform standard rag - return self.execute_rag_state(arguments, formatted_context, citations, welcome_message) + # We are in orientation mode, so we give a standard message, and move to rag state + start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION, + arguments.language) + self.flow.state.answer = f"{welcome_message}\n\n{start_selection_question}" + self.flow.state.phase = "rag" + + results = SelectionResult.create_for_type(self.type, self.type_version) + + return results def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations, start_message=None) -> SpecialistResult: @@ -172,7 +172,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): ko_questions = self._get_ko_questions() fields = {} for ko_question in ko_questions.ko_questions: - current_app.logger.debug(f"KO Question: {ko_question}") fields[ko_question.title] = { "name": ko_question.title, "description": ko_question.title, @@ -213,11 +212,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): if not arguments.form_values: raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id, "No form values returned") - current_app.logger.debug(f"Form values: {arguments.form_values}") # Load the previous KO Questions previous_ko_questions = self._get_ko_questions().ko_questions - current_app.logger.debug(f"Previous KO Questions: {previous_ko_questions}") # Evaluate KO Criteria evaluation = "positive" @@ -355,39 +352,37 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): results = SelectionResult.create_for_type(self.type, self.type_version,) return results - def execute_rag_state(self, arguments: SpecialistArguments, formatted_context, citations, welcome_message=None) \ + def execute_rag_state(self, arguments: SpecialistArguments, formatted_context, citations) \ -> SpecialistResult: self.log_tuning("Traicie Selection Specialist rag_state started", {}) start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION, arguments.language) - if welcome_message: - answer = f"{welcome_message}\n\n{start_selection_question}" - else: - answer = "" - rag_results = None - if arguments.question: - if HumanAnswerServices.check_additional_information(self.tenant_id, - START_SELECTION_QUESTION, - arguments.question, - arguments.language): - rag_results = self.execute_rag(arguments, formatted_context, citations) - self.flow.state.rag_output = rag_results.rag_output - answer = f"{answer}\n{rag_results.answer}" + rag_output = None - if HumanAnswerServices.check_affirmative_answer(self.tenant_id, + if HumanAnswerServices.check_additional_information(self.tenant_id, START_SELECTION_QUESTION, arguments.question, arguments.language): - return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer) + rag_output = self.execute_rag(arguments, formatted_context, citations) + self.flow.state.rag_output = rag_output + answer = rag_output.answer + else: + answer = "" - self.flow.state.answer = answer - self.flow.state.phase = "rag" - self.flow.state.form_request = None + if HumanAnswerServices.check_affirmative_answer(self.tenant_id, + START_SELECTION_QUESTION, + arguments.question, + arguments.language): + return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer) + else: + self.flow.state.answer = f"{answer}\n\n{start_selection_question}" + self.flow.state.phase = "rag" + self.flow.state.form_request = None - results = SelectionResult.create_for_type(self.type, self.type_version,) - return results + results = SelectionResult.create_for_type(self.type, self.type_version,) + return results def execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput: self.log_tuning("RAG Specialist execution started", {}) @@ -395,6 +390,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): insufficient_info_message = TranslationServices.translate(self.tenant_id, INSUFFICIENT_INFORMATION_MESSAGE, arguments.language) + + formatted_context, citations = self._retrieve_context(arguments) + self.flow.state.citations = citations if formatted_context: flow_inputs = { "language": arguments.language, @@ -403,9 +401,11 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): "history": self.formatted_history, "name": self.specialist.configuration.get('name', ''), } - rag_output = self.flow.kickoff(inputs=flow_inputs) - if rag_output.rag_output.insufficient_info: - rag_output.rag_output.answer = insufficient_info_message + flow_results = self.flow.kickoff(inputs=flow_inputs) + if flow_results.rag_output.insufficient_info: + flow_results.rag_output.answer = insufficient_info_message + + rag_output = flow_results.rag_output else: rag_output = RAGOutput(answer=insufficient_info_message, insufficient_info=True) @@ -418,8 +418,11 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): START_SELECTION_QUESTION, arguments.question, arguments.language): - results = self.execute_rag(arguments, formatted_context, citations) - return results + rag_output = self.execute_rag(arguments, formatted_context, citations) + + self.flow.state.rag_output = rag_output + + return rag_output else: return None @@ -439,7 +442,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor): ko_questions_data = minio_client.download_asset_file(self.tenant_id, ko_questions_asset.bucket_name, ko_questions_asset.object_name) ko_questions = KOQuestions.from_json(ko_questions_data) - current_app.logger.debug(f"KO Questions: {ko_questions}") return ko_questions @@ -470,8 +472,8 @@ class SelectionInput(BaseModel): history: Optional[str] = Field(None, alias="history") name: Optional[str] = Field(None, alias="name") # Selection elements - region: str = Field(..., alias="region") - working_schedule: Optional[str] = Field(..., alias="working_schedule") + region: Optional[str] = Field(None, alias="region") + working_schedule: Optional[str] = Field(None, alias="working_schedule") start_date: Optional[date] = Field(None, alias="vacancy_text") interaction_mode: Optional[str] = Field(None, alias="interaction_mode") tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice") @@ -489,6 +491,7 @@ class SelectionFlowState(EveAIFlowState): ko_criteria_answers: Optional[Dict[str, str]] = None personal_contact_data: Optional[PersonalContactData] = None contact_time: Optional[str] = None + citations: Optional[List[Dict[str, Any]]] = None class SelectionResult(SpecialistResult): @@ -530,7 +533,6 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]): raise e async def kickoff_async(self, inputs=None): - current_app.logger.debug(f"Async kickoff {self.name}") self.state.input = SelectionInput.model_validate(inputs) result = await super().kickoff_async(inputs) return self.state diff --git a/eveai_workers/tasks.py b/eveai_workers/tasks.py index f92d71d..89ddf95 100644 --- a/eveai_workers/tasks.py +++ b/eveai_workers/tasks.py @@ -586,7 +586,6 @@ def combine_chunks_for_markdown(potential_chunks, min_chars, max_chars, processo # Force new chunk if pattern matches if chunking_patterns and matches_chunking_pattern(chunk, chunking_patterns): if current_chunk and current_length >= min_chars: - current_app.logger.debug(f"Chunk Length of chunk to embed: {len(current_chunk)} ") actual_chunks.append(current_chunk) current_chunk = chunk current_length = chunk_length @@ -594,7 +593,6 @@ def combine_chunks_for_markdown(potential_chunks, min_chars, max_chars, processo if current_length + chunk_length > max_chars: if current_length >= min_chars: - current_app.logger.debug(f"Chunk Length of chunk to embed: {len(current_chunk)} ") actual_chunks.append(current_chunk) current_chunk = chunk current_length = chunk_length @@ -608,7 +606,6 @@ def combine_chunks_for_markdown(potential_chunks, min_chars, max_chars, processo # Handle the last chunk if current_chunk and current_length >= 0: - current_app.logger.debug(f"Chunk Length of chunk to embed: {len(current_chunk)} ") actual_chunks.append(current_chunk) return actual_chunks @@ -630,7 +627,6 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s ValueError: If no matching processor is found """ try: - current_app.logger.debug(f"Getting processor for catalog {catalog_id}, file type {file_type}, file sub_type {sub_file_type} ") # Start with base query for catalog query = Processor.query.filter_by(catalog_id=catalog_id).filter_by(active=True) @@ -647,7 +643,6 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s if not available_processors: raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type) available_processor_types = [processor.type for processor in available_processors] - current_app.logger.debug(f"Available processors for catalog {catalog_id}: {available_processor_types}") # Find processor type that handles this file type matching_processor_type = None @@ -657,17 +652,13 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s supported_types = config['file_types'] if isinstance(supported_types, str): supported_types = [t.strip() for t in supported_types.split(',')] - current_app.logger.debug(f"Supported types for processor type {proc_type}: {supported_types}") if file_type in supported_types: matching_processor_type = proc_type break - current_app.logger.debug(f"Processor type found for catalog {catalog_id}, file type {file_type}: {matching_processor_type}") if not matching_processor_type: raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type) - else: - current_app.logger.debug(f"Processor type found for file type: {file_type}: {matching_processor_type}") processor = None for proc in available_processors: @@ -678,7 +669,6 @@ def get_processor_for_document(catalog_id: int, file_type: str, sub_file_type: s if not processor: raise EveAINoProcessorFound(catalog_id, file_type, sub_file_type) - current_app.logger.debug(f"Processor found for catalog {catalog_id}, file type {file_type}: {processor}") return processor except Exception as e: diff --git a/requirements.txt b/requirements.txt index ebb187e..001fffd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -82,7 +82,7 @@ typing_extensions~=4.12.2 babel~=2.16.0 dogpile.cache~=1.3.3 python-docx~=1.1.2 -crewai~=0.121.0 +crewai~=0.140.0 sseclient~=0.0.27 termcolor~=2.5.0 mistral-common~=1.5.5