- Refinement of the chat client to have better visible clues for user vs chatbot messages

- Introduction of interview_phase and normal phase in TRAICIE_SELECTION_SPECIALIST to make interaction with bot more human.
- More and random humanised messages to TRAICIE_SELECTION_SPECIALIST
This commit is contained in:
Josako
2025-08-02 16:36:41 +02:00
parent 998ddf4c03
commit 9a88582fff
50 changed files with 2064 additions and 384 deletions

View File

@@ -31,11 +31,12 @@ def get_default_chat_customisation(tenant_customisation=None):
'progress_tracker_insights': 'No Information', 'progress_tracker_insights': 'No Information',
'form_title_display': 'Full Title', 'form_title_display': 'Full Title',
'active_background_color': '#ffffff', 'active_background_color': '#ffffff',
'active_text_color': '#212529',
'history_background': 10, 'history_background': 10,
'history_user_message_background': -10, 'ai_message_background': '#ffffff',
'history_ai_message_background': 0, 'ai_message_text_color': '#212529',
'history_message_text_color': '#212529', 'human_message_background': '#212529',
'human_message_text_color': '#ffffff',
} }
# If no tenant customization is provided, return the defaults # If no tenant customization is provided, return the defaults

View File

@@ -57,9 +57,7 @@ def replace_variable_in_template(template: str, variable: str, value: str) -> st
str: Template with variable placeholder replaced str: Template with variable placeholder replaced
""" """
current_app.logger.info(f"Replacing variable {variable} with value {value}")
modified_template = template.replace(f"{{{variable}}}", value or "") modified_template = template.replace(f"{{{variable}}}", value or "")
current_app.logger.info(f"Modified template: {modified_template}")
return modified_template return modified_template

View File

@@ -15,7 +15,7 @@ backstory: >
include a salutation or closing greeting in your answer. include a salutation or closing greeting in your answer.
{custom_backstory} {custom_backstory}
full_model_name: "mistral.mistral-medium-latest" full_model_name: "mistral.mistral-medium-latest"
temperature: 0.3 temperature: 0.5
metadata: metadata:
author: "Josako" author: "Josako"
date_added: "2025-01-08" date_added: "2025-01-08"

View File

@@ -3,7 +3,7 @@ name: "Role Definition Catalog"
description: "A Catalog containing information specific to a specific role" description: "A Catalog containing information specific to a specific role"
configuration: configuration:
tagging_fields: tagging_fields:
role_identification: role_reference:
type: "string" type: "string"
required: true required: true
description: "A unique identification for the role" description: "A unique identification for the role"

View File

@@ -55,11 +55,6 @@ configuration:
description: "Primary Color" description: "Primary Color"
type: "color" type: "color"
required: false required: false
active_text_color:
name: "Active Interaction Text Color"
description: "Secondary Color"
type: "color"
required: false
history_background: history_background:
name: "History Background" name: "History Background"
description: "Percentage to lighten (+) / darken (-) the user message background" description: "Percentage to lighten (+) / darken (-) the user message background"
@@ -67,27 +62,28 @@ configuration:
min_value: -50 min_value: -50
max_value: 50 max_value: 50
required: false required: false
history_user_message_background: ai_message_background:
name: "History User Message Background" name: "AI (Bot) Message Background Color"
description: "Percentage to lighten (+) / darken (-) the user message background" description: "AI (Bot) Message Background Color"
type: "integer" type: "color"
min_value: -50
max_value: 50
required: false required: false
history_ai_message_background: ai_message_text_color:
name: "History AI Message Background" name: "AI (Bot) Message Text Color"
description: "Percentage to lighten (+) / darken (-) the AI message background" description: "AI (Bot) Message Text Color"
type: "integer" type: "color"
min_value: -50
max_value: 50
required: false required: false
history_message_text_color: human_message_background:
name: "History Text Color" name: "Human Message Background Color"
description: "History Message Text Color" description: "Human Message Background Color"
type: "color"
required: false
human_message_text_color:
name: "Human Message Text Color"
description: "Human Message Text Color"
type: "color" type: "color"
required: false required: false
metadata: metadata:
author: "Josako" author: "Josako"
date_added: "2024-06-06" date_added: "2024-06-06"
changes: "Initial version" changes: "Adaptations to make color choosing more consistent and user friendly"
description: "Parameters allowing to customise the chat client" description: "Parameters allowing to customise the chat client"

View File

@@ -42,7 +42,7 @@ configuration:
image_handling: image_handling:
name: "Image Handling" name: "Image Handling"
type: "enum" type: "enum"
description: "How to handle embedded images" description: "How to handle embedded img"
required: false required: false
default: "skip" default: "skip"
allowed_values: ["skip", "extract", "placeholder"] allowed_values: ["skip", "extract", "placeholder"]

View File

@@ -14,8 +14,8 @@ configuration:
required: true required: true
default: 0.3 default: 0.3
arguments: arguments:
role_identification: role_reference:
name: "Role Identification" name: "Role Reference"
type: "string" type: "string"
description: "The role information needs to be retrieved for" description: "The role information needs to be retrieved for"
required: true required: true

View File

@@ -19,8 +19,8 @@ arguments:
type: "string" type: "string"
description: "Query to retrieve embeddings" description: "Query to retrieve embeddings"
required: true required: true
role_identification: role_reference:
name: "Role Identification" name: "Role Reference"
type: "string" type: "string"
description: "The role information needs to be retrieved for" description: "The role information needs to be retrieved for"
required: true required: true

View File

@@ -0,0 +1,31 @@
type: "PERSONAL_CONTACT_FORM"
version: "1.0.0"
name: "Personal Contact Form"
icon: "person"
fields:
name:
name: "Name"
description: "Your name"
type: "str"
required: true
# It is possible to also add a field 'context'. It allows you to provide an elaborate piece of information.
email:
name: "Email"
type: "str"
description: "Your Name"
required: true
phone:
name: "Phone Number"
type: "str"
description: "Your Phone Number"
required: true
consent:
name: "Consent"
type: "boolean"
description: "Consent"
required: true
metadata:
author: "Josako"
date_added: "2025-07-29"
changes: "Initial Version"
description: "Personal Contact Form"

View File

@@ -0,0 +1,29 @@
version: "1.1.0"
name: "Traicie KO Criteria Interview Definition Specialist"
framework: "crewai"
partner: "traicie"
chat: false
configuration:
arguments:
specialist_id:
name: "specialist_id"
description: "ID of the specialist for which to define KO Criteria Questions and Asnwers"
type: "integer"
required: true
results:
asset_id:
name: "asset_id"
description: "ID of the Asset containing questions and answers for each of the defined KO Criteria"
type: "integer"
required: true
agents:
- type: "TRAICIE_HR_BP_AGENT"
version: "1.0"
tasks:
- type: "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK"
version: "1.0"
metadata:
author: "Josako"
date_added: "2025-07-01"
changes: "Initial Version"
description: "Specialist assisting in questions and answers definition for KO Criteria"

View File

@@ -0,0 +1,121 @@
version: "1.4.0"
name: "Traicie Selection Specialist"
framework: "crewai"
partner: "traicie"
chat: true
configuration:
name:
name: "Name"
description: "The name the specialist is called upon."
type: "str"
required: true
role_reference:
name: "Role Reference"
description: "A customer reference to the role"
type: "str"
required: false
make:
name: "Make"
description: "The make for which the role is defined and the selection specialist is created"
type: "system"
system_name: "tenant_make"
required: true
competencies:
name: "Competencies"
description: "An ordered list of competencies."
type: "ordered_list"
list_type: "competency_details"
required: true
tone_of_voice:
name: "Tone of Voice"
description: "The tone of voice the specialist uses to communicate"
type: "enum"
allowed_values: ["Professional & Neutral", "Warm & Empathetic", "Energetic & Enthusiastic", "Accessible & Informal", "Expert & Trustworthy", "No-nonsense & Goal-driven"]
default: "Professional & Neutral"
required: true
language_level:
name: "Language Level"
description: "Language level to be used when communicating, relating to CEFR levels"
type: "enum"
allowed_values: ["Basic", "Standard", "Professional"]
default: "Standard"
required: true
welcome_message:
name: "Welcome Message"
description: "Introductory text given by the specialist - but translated according to Tone of Voice, Language Level and Starting Language"
type: "text"
required: false
competency_details:
title:
name: "Title"
description: "Competency Title"
type: "str"
required: true
description:
name: "Description"
description: "Description (in context of the role) of the competency"
type: "text"
required: true
is_knockout:
name: "KO"
description: "Defines if the competency is a knock-out criterium"
type: "boolean"
required: true
default: false
assess:
name: "Assess"
description: "Indication if this competency is to be assessed"
type: "boolean"
required: true
default: true
arguments:
region:
name: "Region"
type: "str"
description: "The region of the specific vacancy"
required: false
working_schedule:
name: "Work Schedule"
type: "str"
description: "The work schedule or employment type of the specific vacancy"
required: false
start_date:
name: "Start Date"
type: "date"
description: "The start date of the specific vacancy"
required: false
language:
name: "Language"
type: "str"
description: "The language (2-letter code) used to start the conversation"
required: true
interaction_mode:
name: "Interaction Mode"
type: "enum"
description: "The interaction mode the specialist will start working in."
allowed_values: ["orientation", "selection"]
default: "orientation"
required: true
results:
competencies:
name: "competencies"
type: "List[str, str]"
description: "List of vacancy competencies and their descriptions"
required: false
agents:
- type: "TRAICIE_RECRUITER_AGENT"
version: "1.0"
- type: "RAG_AGENT"
version: "1.1"
tasks:
- type: "TRAICIE_DETERMINE_INTERVIEW_MODE_TASK"
version: "1.0"
- type: "TRAICIE_AFFIRMATIVE_ANSWER_CHECK_TASK"
version: "1.0"
- type: "ADVANCED_RAG_TASK"
version: "1.0"
metadata:
author: "Josako"
date_added: "2025-07-30"
changes: "Update for a Full Virtual Assistant Experience"
description: "Assistant to assist in candidate selection"

View File

@@ -0,0 +1,43 @@
version: "1.0.0"
name: "Advanced RAG Task"
task_description: >
Answer the following question (in between triple £):
£££{question}£££
Base your answer on the following context (in between triple $):
$$${context}$$$
Take into account the following history of the conversation (in between triple €):
€€€{history}€€€
The HUMAN parts indicate the interactions by the end user, the AI parts are your interactions.
Best Practices are:
- Answer the provided question as precisely and directly as you can, combining elements of the provided context.
- Always focus your answer on the actual question.
- Limit repetition in your answers to an absolute minimum, unless absolutely necessary.
- Always be friendly and helpful for the end user.
Tune your answers to the following:
- You use the following Tone of Voice for your answer: {tone_of_voice}, i.e. {tone_of_voice_context}
- You use the following Language Level for your answer: {language_level}, i.e. {language_level_context}
Use the following language in your communication: {language}
If the question cannot be answered using the given context, answer "I have insufficient information to answer this
question." and give the appropriate indication.
{custom_description}
expected_output: >
metadata:
author: "Josako"
date_added: "2025-07-30"
description: "A Task that performs RAG and checks for human answers"
changes: "Initial version"

View File

@@ -1,19 +1,32 @@
version: "1.0.0" version: "1.0.0"
name: "RAG Task" name: "RAG Task"
task_description: > task_description: >
Answer the question based on the following context, and taking into account the history of the discussion. Try not to Answer the following question (in between triple £):
repeat answers already given in the recent history, unless confirmation is required or repetition is essential to
give a coherent answer. £££{question}£££
Base your answer on the following context (in between triple $):
$$${context}$$$
Take into account the following history of the conversation (in between triple €):
€€€{history}€€€
The HUMAN parts indicate the interactions by the end user, the AI parts are your interactions.
Best Practices are:
- Answer the provided question as precisely and directly as you can, combining elements of the provided context.
- Always focus your answer on the actual HUMAN question.
- Try not to repeat your answers (preceded by AI), unless absolutely necessary.
- Focus your answer on the question at hand.
- Always be friendly and helpful for the end user.
{custom_description} {custom_description}
Use the following {language} in your communication, and cite the sources used at the end of the full conversation. Use the following {language} in your communication.
If the question cannot be answered using the given context, answer "I have insufficient information to answer this If the question cannot be answered using the given context, answer "I have insufficient information to answer this
question." question." and give the appropriate indication.
Context (in between triple $):
$$${context}$$$
History (in between triple €):
€€€{history}€€€
Question (in between triple £):
£££{question}£££
expected_output: > expected_output: >
Your answer. Your answer.
metadata: metadata:

View File

@@ -0,0 +1,29 @@
version: "1.0.0"
name: "Traicie Affirmative Answer Check"
task_description: >
You are provided with the following end user answer (in between triple £):
£££{question}£££
This is the history of the conversation (in between triple €):
€€€{history}€€€
(In this history, user interactions are preceded by 'HUMAN', and your interactions with 'AI'.)
Check if the user has given an affirmative answer or not.
Please note that this answer can be very short:
- Affirmative answers: e.g. Yes, OK, Sure, Of Course
- Negative answers: e.g. No, not really, No, I'd rather not.
Please consider that the answer will be given in {language}!
{custom_description}
expected_output: >
Your determination if the answer was affirmative (true) or negative (false)
metadata:
author: "Josako"
date_added: "2025-07-30"
description: "A Task to check if the answer to a question is affirmative"
changes: "Initial version"

View File

@@ -1,30 +0,0 @@
version: "1.0.0"
name: "KO Criteria Interview Definition"
task_description: >
In context of a vacancy in your company {tenant_name}, you are provided with a set of competencies. (both description
and title). The competencies are in between triple backquotes. You need to prepare for the interviews,
and are to provide for each of these ko criteria:
- A question to ask the recruitment candidate describing the context of the competency. Use your experience to not
just ask a closed question, but a question from which you can indirectly derive a positive or negative qualification of
the competency based on the answer of the candidate.
Apply the following tone of voice in both questions and answers: {tone_of_voice}
Apply the following language level in both questions and answers: {language_level}
Respect the language of the competencies, and return all output in the same language.
```{competencies}```
{custom_description}
expected_output: >
For each of the ko criteria, you provide:
- the exact title in the original language
- the question
- a set of answers, with for each answer an indication if it is the correct answer, or a false response.
{custom_expected_output}
metadata:
author: "Josako"
date_added: "2025-06-15"
description: "A Task to define interview Q&A from given KO Criteria"
changes: "Initial Version"

View File

@@ -0,0 +1,23 @@
version: "1.0.0"
name: "Traicie Determine Interview Mode"
task_description: >
you are provided with the following user input (in between triple backquotes):
```{question}```
If this user input contains one or more questions, your answer is simply 'RAG'. In all other cases, your answer is
'CHECK'.
Best practices to be applied:
- A question doesn't always have an ending question mark. It can be a query for more information, such as 'I'd like
to understand ...', 'I'd like to know more about...'. Or it is possible the user didn't enter a question mark. Take
into account the user might be working on a mobile device like a phone, making typing not as obvious.
- If there is a question mark, then normally you are provided with a question of course.
expected_output: >
Your Answer.
metadata:
author: "Josako"
date_added: "2025-07-30"
description: "A Task to determine the interview mode based on the last user input"
changes: "Initial version"

View File

@@ -12,4 +12,8 @@ SPECIALIST_FORM_TYPES = {
"name": "Contact Time Preferences Form", "name": "Contact Time Preferences Form",
"description": "A form for entering contact time preferences", "description": "A form for entering contact time preferences",
}, },
"MINIMAL_PERSONAL_CONTACT_FORM": {
"name": "Personal Contact Form",
"description": "A form for entering your personal contact details",
}
} }

View File

@@ -37,14 +37,23 @@ TASK_TYPES = {
"description": "A Task to get Competencies from a Vacancy Text", "description": "A Task to get Competencies from a Vacancy Text",
"partner": "traicie" "partner": "traicie"
}, },
"TRAICIE_GET_KO_CRITERIA_TASK": {
"name": "Traicie Get KO Criteria",
"description": "A Task to get KO Criteria from a Vacancy Text",
"partner": "traicie"
},
"TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK": { "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK": {
"name": "Traicie KO Criteria Interview Definition", "name": "Traicie KO Criteria Interview Definition",
"description": "A Task to define KO Criteria questions to be used during the interview", "description": "A Task to define KO Criteria questions to be used during the interview",
"partner": "traicie" "partner": "traicie"
},
"TRAICIE_ADVANCED_RAG_TASK": {
"name": "Traicie Advanced RAG",
"description": "A Task to perform Advanced RAG taking into account previous questions, tone of voice and language level",
"partner": "traicie"
},
"TRAICIE_AFFIRMATIVE_ANSWER_CHECK_TASK": {
"name": "Traicie Affirmative Answer Check",
"description": "A Task to check if the answer to a question is affirmative",
"partner": "traicie"
},
"TRAICIE_DETERMINE_INTERVIEW_MODE_TASK": {
"name": "Traicie Determine Interview Mode",
"description": "A Task to determine the interview mode based on the last user input",
} }
} }

View File

@@ -3,6 +3,10 @@
cd /Volumes/OWC4M2_1/Development/Josako/EveAI/TBD/docker cd /Volumes/OWC4M2_1/Development/Josako/EveAI/TBD/docker
source ./docker_env_switch.sh dev source ./docker_env_switch.sh dev
echo "Copying client images"
cp -fv ../eveai_chat_client/static/assets/img/* ../nginx/static/assets/img
dcdown eveai_chat_client nginx dcdown eveai_chat_client nginx
cd ../nginx cd ../nginx

View File

@@ -31,10 +31,10 @@ fi
# Path to your docker-compose file # Path to your docker-compose file
DOCKER_COMPOSE_FILE="compose_dev.yaml" DOCKER_COMPOSE_FILE="compose_dev.yaml"
# Get all the images defined in docker-compose # Get all the img defined in docker-compose
IMAGES=$(docker compose -f $DOCKER_COMPOSE_FILE config | grep 'image:' | awk '{ print $2 }') IMAGES=$(docker compose -f $DOCKER_COMPOSE_FILE config | grep 'image:' | awk '{ print $2 }')
# Start tagging only relevant images # Start tagging only relevant img
for DOCKER_IMAGE in $IMAGES; do for DOCKER_IMAGE in $IMAGES; do
# Check if the image belongs to your Docker account and ends with :latest # Check if the image belongs to your Docker account and ends with :latest
if [[ $DOCKER_IMAGE == $DOCKER_ACCOUNT* && $DOCKER_IMAGE == *:latest ]]; then if [[ $DOCKER_IMAGE == $DOCKER_ACCOUNT* && $DOCKER_IMAGE == *:latest ]]; then

View File

@@ -108,7 +108,91 @@ const { loadIcon } = useFormIcon(() => props.formData);
1. **ChatInput.vue** - Uses `useIconManager()` composable 1. **ChatInput.vue** - Uses `useIconManager()` composable
2. **ChatMessage.vue** - Uses `useIconManager()` composable 2. **ChatMessage.vue** - Uses `useIconManager()` composable
3. **DynamicForm.vue** - Uses `useIconManager()` composable 3. **DynamicForm.vue** - Uses `useIconManager()` composable with boolean icon support
## 🔘 Boolean Value Display
### Overview
Boolean values in read-only DynamicForm components are automatically displayed using Material Icons instead of text for improved user experience.
### Icon Mapping
```javascript
const booleanIconMapping = {
true: 'check_circle', // Green checkmark icon
false: 'cancel' // Red cancel/cross icon
};
```
### Visual Styling
- **True values**: Green `check_circle` icon (#4caf50)
- **False values**: Red `cancel` icon (#f44336)
- **Size**: 20px font size with middle vertical alignment
- **Accessibility**: Includes `aria-label` and `title` attributes
### Usage Example
```vue
<!-- Form definition with boolean fields -->
<script>
export default {
data() {
return {
formData: {
title: 'User Settings',
fields: [
{ id: 'active', name: 'Actief', type: 'boolean' },
{ id: 'verified', name: 'Geverifieerd', type: 'boolean' }
]
},
formValues: {
active: true, // Will show green check_circle
verified: false // Will show red cancel
}
};
}
};
</script>
<!-- Read-only display -->
<dynamic-form
:form-data="formData"
:form-values="formValues"
:read-only="true"
api-prefix="/api"
/>
```
### Implementation Details
- **Automatic icon loading**: Boolean icons (`check_circle`, `cancel`) are preloaded when DynamicForm mounts
- **Read-only only**: Edit mode continues to use standard HTML checkboxes
- **Accessibility**: Each icon includes Dutch labels ('Ja'/'Nee') for screen readers
- **Responsive**: Icons scale appropriately with form styling
### CSS Classes
```css
.boolean-icon {
font-size: 20px;
vertical-align: middle;
}
.boolean-true {
color: #4caf50; /* Green for true */
}
.boolean-false {
color: #f44336; /* Red for false */
}
.field-value.boolean-value {
display: flex;
align-items: center;
}
```
### Zero Legacy Code Remaining ✅ ### Zero Legacy Code Remaining ✅

View File

@@ -79,23 +79,14 @@
} }
/* Chat Input styling */ /* Chat Input styling */
.chat-input-container {
width: 100%;
position: relative;
padding: 20px; /* Interne padding voor ChatInput */
box-sizing: border-box;
max-width: 1000px; /* Optimale breedte */
margin-left: auto;
margin-right: auto; /* Horizontaal centreren */
}
.chat-input { .chat-input {
display: flex; display: flex;
align-items: flex-end; align-items: flex-end;
gap: 12px; gap: 12px;
padding: 20px; padding: 20px;
background: var(--active-background-color); background: var(--human-message-background);
color: var(--active-text-color); color: var(--human-message-text-color);
border-radius: 15px; border-radius: 15px;
box-shadow: 0 2px 15px rgba(0,0,0,0.1); box-shadow: 0 2px 15px rgba(0,0,0,0.1);
border: 1px solid rgba(0,0,0,0.05); border: 1px solid rgba(0,0,0,0.05);
@@ -113,37 +104,6 @@
gap: 8px; gap: 8px;
} }
.send-btn {
width: 45px;
height: 45px;
border: none;
border-radius: 50%;
background: var(--active-background-color);
color: var(--active-text-color);
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
font-size: 18px;
transition: all 0.2s ease;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
.send-btn:hover:not(:disabled) {
background: var(--active-text-color);
color: var(--active-background-color);
transform: scale(1.05);
box-shadow: 0 4px 15px rgba(0,0,0,0.2);
}
.send-btn:disabled {
background: #ccc;
cursor: not-allowed;
transform: none;
box-shadow: none;
}
/* Character counter */ /* Character counter */
.character-counter { .character-counter {
position: absolute; position: absolute;
@@ -152,7 +112,7 @@
font-size: 12px; font-size: 12px;
color: #666; color: #666;
padding: 2px 6px; padding: 2px 6px;
background: rgba(255,255,255,0.9); background: rgba(255,255,255,0.2);
border-radius: 10px; border-radius: 10px;
backdrop-filter: blur(5px); backdrop-filter: blur(5px);
} }
@@ -190,11 +150,6 @@
max-width: 100%; /* Op mobiel volledige breedte gebruiken */ max-width: 100%; /* Op mobiel volledige breedte gebruiken */
} }
.chat-input-container {
padding: 15px;
max-width: 100%; /* Op mobiel volledige breedte gebruiken */
}
.chat-input { .chat-input {
padding: 15px; padding: 15px;
gap: 10px; gap: 10px;
@@ -224,10 +179,6 @@
.message-history-container { .message-history-container {
padding: 12px; padding: 12px;
} }
.chat-input-container {
padding: 12px;
}
} }
/* Loading states */ /* Loading states */
@@ -343,16 +294,16 @@
/* User message bubble styling */ /* User message bubble styling */
.message.user .message-content { .message.user .message-content {
background: var(--history-user-message-background); background: var(--human-message-background);
color: var(--history-message-text-color); color: var(--human-message-text-color);
border-bottom-right-radius: 4px; border-bottom-right-radius: 4px;
} }
/* AI/Bot message bubble styling */ /* AI/Bot message bubble styling */
.message.ai .message-content, .message.ai .message-content,
.message.bot .message-content { .message.bot .message-content {
background: var(--history-ai-message-background); background: var(--ai-message-background);
color: var(--history-message-text-color); color: var(--ai-message-text-color);
border-bottom-left-radius: 4px; border-bottom-left-radius: 4px;
margin-right: 60px; margin-right: 60px;
} }

View File

@@ -1,14 +1,6 @@
/* ChatInput component styling */ /* ChatInput component styling */
/* Algemene container */ /* Algemene container */
.chat-input-container {
width: 100%;
padding: 10px;
background-color: #fff;
border-top: 1px solid #e0e0e0;
font-family: Arial, sans-serif;
font-size: 14px;
}
/* Input veld en knoppen */ /* Input veld en knoppen */
.chat-input { .chat-input {
@@ -42,38 +34,6 @@
gap: 8px; gap: 8px;
} }
/* Verzendknop */
.send-btn {
display: flex;
align-items: center;
justify-content: center;
width: 40px;
height: 40px;
background-color: #0084ff;
color: white;
border: none;
border-radius: 50%;
cursor: pointer;
transition: background-color 0.2s;
}
.send-btn:hover {
background-color: #0077e6;
}
.send-btn:disabled {
background-color: #ccc;
cursor: not-allowed;
}
.send-btn.form-mode {
background-color: #4caf50;
}
.send-btn.form-mode:hover {
background-color: #43a047;
}
/* Loading spinner */ /* Loading spinner */
.loading-spinner { .loading-spinner {
display: inline-block; display: inline-block;
@@ -85,3 +45,40 @@
100% { transform: rotate(360deg); } 100% { transform: rotate(360deg); }
} }
/* Form actions container */
.form-actions {
display: flex;
justify-content: flex-end;
align-items: center;
padding: 10px 0;
margin-top: 10px;
}
/* Form send actions container - for send button within form */
.form-send-actions {
display: flex;
justify-content: flex-end;
align-items: center;
padding: 10px 0;
margin-top: 10px;
}
/* Dynamic form container transitions */
.dynamic-form-container {
transition: opacity 0.3s ease-in-out, transform 0.3s ease-in-out;
transform: translateY(0);
opacity: 1;
}
/* Chat input transitions */
.chat-input {
transition: opacity 0.3s ease-in-out, transform 0.3s ease-in-out;
transform: translateY(0);
opacity: 1;
}
/* Smooth transitions for mode switching */
.chat-input-container > * {
transition: opacity 0.2s ease-in-out;
}

View File

@@ -149,3 +149,46 @@
font-family: Arial, sans-serif; font-family: Arial, sans-serif;
font-size: 14px; font-size: 14px;
} }
/* Ensure forms in messages use full available width */
.message .dynamic-form-container {
width: 100%;
max-width: none;
}
.message .dynamic-form {
width: 100%;
}
.message .form-fields {
width: 100%;
}
/* Optimize form field layout in messages to prevent unnecessary label wrapping */
.message .form-field {
display: grid;
grid-template-columns: 30% 70%;
gap: 12px;
align-items: start;
width: 100%;
}
/* Ensure form field inputs use full available space */
.message .form-field input,
.message .form-field select,
.message .form-field textarea {
width: 100%;
box-sizing: border-box;
}
/* Special handling for radio fields in messages */
.message .form-field.radio-field {
display: block;
width: 100%;
}
.message .form-field.radio-field .field-label {
display: block;
margin-bottom: 8px;
width: 100%;
}

View File

@@ -124,3 +124,21 @@
.text-value { .text-value {
white-space: pre-wrap; white-space: pre-wrap;
} }
/* Loading spinner for send button */
.form-actions .loading-spinner {
display: inline-block;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* Flexbox layout for single send button */
.form-actions.with-send-button {
display: flex;
justify-content: flex-end;
align-items: center;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

View File

@@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="Laag_1" data-name="Laag 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 360 360">
<defs>
<style>
.cls-1 {
fill: #99498f;
}
.cls-2 {
fill: #9c8ae1;
}
.cls-3 {
fill: #eb7f31;
}
</style>
</defs>
<path class="cls-1" d="M303.75,187.38c-.39,21.63-23.09,71.46-44.05,56.75-7.01-4.92-9.52-14.14-6.4-22.11,46.36-118.47-110.59-153.63-165.22-81.96-3.2,4.19-7.98,6.92-13.23,7.39-29.86,2.67-11.81-28.09,9.75-54.84.31-.38.63-.75.96-1.11,77.63-83.29,223.16-15.54,218.2,95.89Z"/>
<path class="cls-3" d="M309.51,261.49c.78,11.39-3.07,22.02-9.91,29.99-6.85,7.97-16.7,13.29-27.94,14.06-5.31.36-10.46-.32-15.24-1.88-6.99-2.28-8.45-11.54-2.54-15.91,5.59-4.13,10.81-8.72,15.62-13.73,1.58-1.64,3.12-3.33,4.61-5.07,1.49-1.73,2.94-3.51,4.33-5.33,2.91-3.79,5.6-7.75,8.07-11.88,1.06-1.77,2.07-3.57,3.05-5.39,3.49-6.55,13.05-6.36,16.17.37,2.1,4.52,3.42,9.49,3.78,14.77Z"/>
<path class="cls-2" d="M248.52,246.01c-17.19-10.96-6.79-24.59-5.31-32.5,21.64-115.53-112.72-111.33-150.58-59.62-5.74,7.84-24.59,5.08-25.04,5.03-8.37-.78-11.67,1.66-14,10.54-27.79,106.04,132.76,184.42,198.64,101.39,6.27-7.9,4.79-19.42-3.71-24.84ZM95.86,233.32c-29.22.43-29.22-45.47,0-45.04,29.21-.43,29.21,45.47,0,45.04ZM189.87,233.32c-29.21.43-29.21-45.47,0-45.04,29.22-.43,29.22,45.47,0,45.04Z"/>
</svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 3.1 MiB

After

Width:  |  Height:  |  Size: 3.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

View File

@@ -26,14 +26,16 @@
:form-values="formValues" :form-values="formValues"
:api-prefix="apiPrefix" :api-prefix="apiPrefix"
:is-submitting="isLoading" :is-submitting="isLoading"
:hide-actions="true" :show-send-button="true"
:is-submitting-form="isLoading"
:send-button-text="'Verstuur formulier'"
@update:form-values="updateFormValues" @update:form-values="updateFormValues"
@form-enter-pressed="sendMessage"
@form-send-submit="handleFormSendSubmit"
></dynamic-form> ></dynamic-form>
<!-- Geen extra knoppen meer onder het formulier, alles gaat via de hoofdverzendknop -->
</div> </div>
<div class="chat-input"> <div v-if="!formData" class="chat-input">
<!-- Main input area --> <!-- Main input area -->
<div class="input-main"> <div class="input-main">
<textarea <textarea
@@ -57,13 +59,12 @@
<!-- Input actions --> <!-- Input actions -->
<div class="input-actions"> <div class="input-actions">
<!-- Universele verzendknop (voor zowel berichten als formulieren) --> <!-- Message send button -->
<button <button
@click="sendMessage" @click="sendMessage"
class="send-btn" class="send-btn"
:class="{ 'form-mode': formData }"
:disabled="!canSend" :disabled="!canSend"
:title="formData ? 'Verstuur formulier' : 'Verstuur bericht'" :title="'Verstuur bericht'"
> >
<span v-if="isLoading" class="loading-spinner"></span> <span v-if="isLoading" class="loading-spinner"></span>
<svg v-else width="20" height="20" viewBox="0 0 24 24" fill="currentColor"> <svg v-else width="20" height="20" viewBox="0 0 24 24" fill="currentColor">
@@ -156,12 +157,15 @@ export default {
}, },
canSend() { canSend() {
const hasValidForm = this.formData && this.validateForm(); if (this.isLoading) return false;
const hasValidMessage = this.localMessage.trim() && !this.isOverLimit;
// We kunnen nu verzenden als er een geldig formulier OF een geldig bericht is if (this.formData) {
// Bij een formulier is aanvullende tekst optioneel // Form mode: only validate form, message is optional
return (!this.isLoading) && (hasValidForm || hasValidMessage); return this.validateForm();
} else {
// Normal mode: validate message
return this.localMessage.trim() && !this.isOverLimit;
}
}, },
hasFormDataToSend() { hasFormDataToSend() {
@@ -319,21 +323,32 @@ export default {
}, },
sendMessage() { sendMessage() {
console.log('ChatInput: sendMessage called, formData:', !!this.formData);
if (!this.canSend) return; if (!this.canSend) return;
// Bij een formulier gaan we het formulier en optioneel bericht combineren // Bij een formulier gaan we het formulier en optioneel bericht combineren
if (this.formData) { if (this.formData) {
console.log('ChatInput: Processing form submission');
// Valideer het formulier // Valideer het formulier
if (this.validateForm()) { if (this.validateForm()) {
// Verstuur het formulier, eventueel met aanvullende tekst // Verstuur het formulier, eventueel met aanvullende tekst
this.$emit('submit-form', this.formValues); this.$emit('submit-form', this.formValues);
} }
} else if (this.localMessage.trim()) { } else if (this.localMessage.trim()) {
console.log('ChatInput: Processing regular message');
// Verstuur normaal bericht zonder formulier // Verstuur normaal bericht zonder formulier
this.$emit('send-message'); this.$emit('send-message');
} }
}, },
handleFormSendSubmit(formValues) {
console.log('ChatInput: handleFormSendSubmit called with values:', formValues);
// Zorg dat formValues correct worden doorgegeven
this.formValues = formValues;
// Roep sendMessage aan om de normale flow te volgen
this.sendMessage();
},
getFormValuesForSending() { getFormValuesForSending() {
// Geeft de huidige formulierwaarden terug voor verzending // Geeft de huidige formulierwaarden terug voor verzending
return this.formValues; return this.formValues;
@@ -420,9 +435,9 @@ export default {
/* Algemene container */ /* Algemene container */
.chat-input-container { .chat-input-container {
width: 100%; width: 100%;
padding: 10px; padding: 20px;
background-color: var(--active-background-color); background-color: var(--active-background-color);
color: var(--active-text-color); color: var(--human-message-text-color);
border-top: 1px solid #e0e0e0; border-top: 1px solid #e0e0e0;
font-family: Arial, sans-serif; font-family: Arial, sans-serif;
font-size: 14px; font-size: 14px;
@@ -454,8 +469,8 @@ export default {
font-family: Arial, sans-serif; font-family: Arial, sans-serif;
font-size: 14px; font-size: 14px;
/* Transparante achtergrond in plaats van wit */ /* Transparante achtergrond in plaats van wit */
background-color: var(--active-background-color); background-color: var(--human-message-background);
color: var(--active-text-color); color: var(--human-message-text-color);
/* Box-sizing om padding correct te berekenen */ /* Box-sizing om padding correct te berekenen */
box-sizing: border-box; box-sizing: border-box;
} }
@@ -466,7 +481,8 @@ export default {
right: 15px; right: 15px;
bottom: 12px; bottom: 12px;
font-size: 12px; font-size: 12px;
color: #999; color: var(--human-message-text-color);
opacity: 0.7;
pointer-events: none; /* Voorkom dat deze de textarea verstoort */ pointer-events: none; /* Voorkom dat deze de textarea verstoort */
} }
@@ -491,38 +507,24 @@ export default {
justify-content: center; justify-content: center;
width: 40px; width: 40px;
height: 40px; height: 40px;
background-color: var(--active-background-color); background-color: var(--human-message-background);
color: var(--active-text-color); color: var(--human-message-text-color);
border: 1px solid var(--active-text-color); border: 2px solid var(--human-message-text-color);
border-radius: 50%; border-radius: 50%;
cursor: pointer; cursor: pointer;
transition: background-color 0.2s; transition: background-color 0.2s;
flex-shrink: 0; /* Voorkom dat de knop krimpt */ flex-shrink: 0; /* Voorkom dat de knop krimpt */
} }
.send-btn:hover { .send-btn:hover:not(:disabled) {
background-color: var(--active-text-color); background-color: var(--human-message-background);
color: var(--active-background-color);
} }
.send-btn:disabled { .send-btn:disabled {
background-color: #ccc; background-color: #ccc;
color: #666;
border-color: #ccc;
cursor: not-allowed; cursor: not-allowed;
} }
.send-btn.form-mode {
background-color: var(--active-background-color);
color: var(--active-text-color);
border-color: var(--active-text-color);
}
.send-btn.form-mode:hover {
background-color: var(--active-text-color);
color: var(--active-background-color);
}
/* Loading spinner */ /* Loading spinner */
.loading-spinner { .loading-spinner {
display: inline-block; display: inline-block;
@@ -538,8 +540,8 @@ export default {
.active-ai-message-area { .active-ai-message-area {
margin-bottom: 15px; margin-bottom: 15px;
padding: 12px; padding: 12px;
background-color: var(--active-background-color); background-color: var(--ai-message-background);
color: var(--active-text-color); color: var(--ai-message-text-color);
border-radius: 8px; border-radius: 8px;
font-family: Arial, sans-serif; font-family: Arial, sans-serif;
font-size: 14px; font-size: 14px;

View File

@@ -3,6 +3,13 @@
<!-- Normal text messages --> <!-- Normal text messages -->
<template v-if="message.type === 'text'"> <template v-if="message.type === 'text'">
<div class="message-content" style="width: 100%;"> <div class="message-content" style="width: 100%;">
<!-- EveAI Logo voor AI berichten - links boven, half buiten de bubbel -->
<img
v-if="message.sender === 'ai'"
src="/static/assets/img/eveai_logo.svg"
alt="EveAI"
class="ai-message-logo"
/>
<!-- Voortgangstracker voor AI berichten met task_id - ALLEEN VOOR LAATSTE AI MESSAGE --> <!-- Voortgangstracker voor AI berichten met task_id - ALLEEN VOOR LAATSTE AI MESSAGE -->
<progress-tracker <progress-tracker
v-if="message.sender === 'ai' && message.taskId && isLatestAiMessage" v-if="message.sender === 'ai' && message.taskId && isLatestAiMessage"
@@ -95,6 +102,13 @@
<!-- Error messages --> <!-- Error messages -->
<template v-else-if="message.type === 'error'"> <template v-else-if="message.type === 'error'">
<div class="message-content error-content"> <div class="message-content error-content">
<!-- EveAI Logo voor AI berichten - links boven, half buiten de bubbel -->
<img
v-if="message.sender === 'ai'"
src="/static/assets/img/eveai_logo.svg"
alt="EveAI"
class="ai-message-logo"
/>
<div class="form-error"> <div class="form-error">
{{ message.content }} {{ message.content }}
</div> </div>
@@ -107,6 +121,13 @@
<!-- Other message types --> <!-- Other message types -->
<template v-else> <template v-else>
<div class="message-content"> <div class="message-content">
<!-- EveAI Logo voor AI berichten - links boven, half buiten de bubbel -->
<img
v-if="message.sender === 'ai'"
src="/static/assets/img/eveai_logo.svg"
alt="EveAI"
class="ai-message-logo"
/>
<div class="message-text" v-html="formatMessage(message.content)"></div> <div class="message-text" v-html="formatMessage(message.content)"></div>
</div> </div>
</template> </template>
@@ -304,6 +325,11 @@ export default {
getMessageClass() { getMessageClass() {
let classes = `message ${this.message.sender}`; let classes = `message ${this.message.sender}`;
// Add 'has-form' class for user messages with formulieren
if (this.message.sender === 'user' && this.hasMeaningfulFormValues(this.message)) {
classes += ' has-form';
}
// Add class for temporarily positioned AI messages // Add class for temporarily positioned AI messages
if (this.message.isTemporarilyAtBottom) { if (this.message.isTemporarilyAtBottom) {
classes += ' temporarily-at-bottom'; classes += ' temporarily-at-bottom';
@@ -343,10 +369,16 @@ export default {
margin-right: auto; margin-right: auto;
} }
/* User messages with forms get fixed width of 90% */
.message.user.has-form {
width: 90%;
max-width: none;
}
/* Styling for temporarily positioned AI messages */ /* Styling for temporarily positioned AI messages */
.message.ai.temporarily-at-bottom { .message.ai.temporarily-at-bottom {
background-color: var(--active-background-color); background-color: var(--ai-message-background);
color: var(--active-text-color); color: var(--ai-message-text-color);
opacity: 0.9; opacity: 0.9;
border-radius: 8px; border-radius: 8px;
padding: 8px; padding: 8px;
@@ -355,24 +387,24 @@ export default {
/* Styling for messages in sticky area - override history colors with active colors */ /* Styling for messages in sticky area - override history colors with active colors */
.message.sticky-area .message-content { .message.sticky-area .message-content {
background: var(--active-background-color); background: var(--ai-message-background);
color: var(--active-text-color); color: var(--ai-message-text-color);
} }
/* Override message bubble colors for sticky area */ /* Override message bubble colors for sticky area */
.message.sticky-area.user .message-content, .message.sticky-area.user .message-content,
.message.sticky-area.ai .message-content { .message.sticky-area.ai .message-content {
background: var(--active-background-color) !important; background: var(--ai-message-background) !important;
color: var(--active-text-color) !important; color: var(--ai-message-text-color) !important;
border: 1px solid var(--active-text-color); border: 1px solid var(--ai-message-text-color);
border-radius: 8px; border-radius: 8px;
padding: 12px; padding: 12px;
} }
/* Active styling for messages in input area */ /* Active styling for messages in input area */
.message.input-area .message-content { .message.input-area .message-content {
background-color: var(--active-background-color); background-color: var(--ai-message-background);
color: var(--active-text-color); color: var(--ai-message-text-color);
border-radius: 8px; border-radius: 8px;
padding: 12px; padding: 12px;
} }
@@ -382,10 +414,30 @@ export default {
font-size: 14px; font-size: 14px;
} }
/* EveAI Logo styling voor AI berichten */
.ai-message-logo {
position: absolute;
top: -20px;
left: -20px;
width: 36px;
height: 36px;
border-radius: 50%;
background-color: var(--ai-message-background);
padding: 2px;
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15);
z-index: 10;
pointer-events: none;
}
/* Ensure message-content has relative positioning for logo positioning */
.message.ai .message-content {
position: relative;
}
/* Formulier styling */ /* Formulier styling */
.form-display { .form-display {
margin: 15px 0; margin: 15px 0;
color: var(--active-text-color); color: var(--human-message-text-color);
padding: 15px; padding: 15px;
font-family: inherit; font-family: inherit;
} }
@@ -425,11 +477,11 @@ export default {
width: 100%; width: 100%;
padding: 6px; padding: 6px;
border-radius: 4px; border-radius: 4px;
border: 1px solid var(--active-text-color); border: 1px solid var(--human-message-text-color);
font-family: Arial, sans-serif; font-family: Arial, sans-serif;
font-size: 14px; font-size: 14px;
background-color: var(--active-background-color); background-color: var(--human-message-background);
color: var(--active-text-color); color: var(--human-message-text-color);
} }
.form-result-table textarea.form-textarea { .form-result-table textarea.form-textarea {
@@ -548,6 +600,11 @@ export default {
max-width: 95%; max-width: 95%;
} }
/* User messages with forms get fixed width of 95% on mobile */
.message.user.has-form {
width: 95%;
}
.form-result-table td:first-child { .form-result-table td:first-child {
width: 40%; width: 40%;
} }

View File

@@ -21,6 +21,7 @@
@update:model-value="updateFieldValue(field.id || field.name, $event)" @update:model-value="updateFieldValue(field.id || field.name, $event)"
@open-privacy-modal="openPrivacyModal" @open-privacy-modal="openPrivacyModal"
@open-terms-modal="openTermsModal" @open-terms-modal="openTermsModal"
@keydown-enter="handleEnterKey"
/> />
</template> </template>
<template v-else-if="typeof formData.fields === 'object'"> <template v-else-if="typeof formData.fields === 'object'">
@@ -33,29 +34,49 @@
@update:model-value="updateFieldValue(fieldId, $event)" @update:model-value="updateFieldValue(fieldId, $event)"
@open-privacy-modal="openPrivacyModal" @open-privacy-modal="openPrivacyModal"
@open-terms-modal="openTermsModal" @open-terms-modal="openTermsModal"
@keydown-enter="handleEnterKey"
/> />
</template> </template>
</div> </div>
<!-- Form actions (only show if not hidden and not read-only) --> <!-- Form actions (only show if not hidden and not read-only) -->
<div v-if="!hideActions && !readOnly" class="form-actions"> <div v-if="!hideActions && !readOnly" class="form-actions" :class="{ 'with-send-button': showSendButton }">
<button <!-- Send button mode (ChatInput styling) -->
type="button" <template v-if="showSendButton">
@click="handleCancel" <button
class="btn btn-secondary" type="button"
:disabled="isSubmitting" @click="handleSendSubmit"
> class="send-btn"
Annuleren :disabled="isSubmittingForm || !isFormValid"
</button> :title="sendButtonText"
<button >
type="button" <span v-if="isSubmittingForm" class="loading-spinner"></span>
@click="handleSubmit" <svg v-else width="20" height="20" viewBox="0 0 24 24" fill="currentColor">
class="btn btn-primary" <path d="M2.01 21L23 12 2.01 3 2 10l15 2-15 2z"/>
:disabled="isSubmitting || !isFormValid" </svg>
> </button>
<span v-if="isSubmitting">Verzenden...</span> </template>
<span v-else>Versturen</span>
</button> <!-- Standard buttons mode -->
<template v-else>
<button
type="button"
@click="handleCancel"
class="btn btn-secondary"
:disabled="isSubmitting"
>
Annuleren
</button>
<button
type="button"
@click="handleSubmit"
class="btn btn-primary"
:disabled="isSubmitting || !isFormValid"
>
<span v-if="isSubmitting">Verzenden...</span>
<span v-else>Versturen</span>
</button>
</template>
</div> </div>
<!-- Read-only form display --> <!-- Read-only form display -->
@@ -66,8 +87,9 @@
class="form-field-readonly" class="form-field-readonly"
> >
<div class="field-label">{{ field.name }}:</div> <div class="field-label">{{ field.name }}:</div>
<div class="field-value" :class="{'text-value': field.type === 'text'}"> <div class="field-value" :class="{'text-value': field.type === 'text', 'boolean-value': field.type === 'boolean'}">
{{ formatFieldValue(fieldId, field) }} <span v-if="field.type === 'boolean'" v-html="formatFieldValue(fieldId, field)"></span>
<span v-else>{{ formatFieldValue(fieldId, field) }}</span>
</div> </div>
</div> </div>
</div> </div>
@@ -87,12 +109,15 @@ export default {
'form-field': FormField 'form-field': FormField
}, },
setup(props) { setup(props) {
const { watchIcon } = useIconManager(); const { watchIcon, loadIcons } = useIconManager();
const contentModal = injectContentModal(); const contentModal = injectContentModal();
// Watch formData.icon for automatic icon loading // Watch formData.icon for automatic icon loading
watchIcon(() => props.formData?.icon); watchIcon(() => props.formData?.icon);
// Preload boolean icons
loadIcons(['check_circle', 'cancel']);
return { return {
contentModal contentModal
}; };
@@ -149,9 +174,21 @@ export default {
apiPrefix: { apiPrefix: {
type: String, type: String,
required: true required: true
},
showSendButton: {
type: Boolean,
default: false
},
sendButtonText: {
type: String,
default: 'Verstuur formulier'
},
isSubmittingForm: {
type: Boolean,
default: false
} }
}, },
emits: ['submit', 'cancel', 'update:formValues'], emits: ['submit', 'cancel', 'update:formValues', 'form-enter-pressed', 'form-send-submit'],
data() { data() {
return { return {
localFormValues: { ...this.formValues } localFormValues: { ...this.formValues }
@@ -259,6 +296,11 @@ export default {
mounted() { mounted() {
// Proactief alle boolean velden initialiseren bij het laden // Proactief alle boolean velden initialiseren bij het laden
this.initializeBooleanFields(); this.initializeBooleanFields();
// Auto-focus on first form field for better UX
this.$nextTick(() => {
this.focusFirstField();
});
}, },
methods: { methods: {
// Proactieve initialisatie van alle boolean velden // Proactieve initialisatie van alle boolean velden
@@ -388,6 +430,16 @@ export default {
this.$emit('cancel'); this.$emit('cancel');
}, },
handleSendSubmit() {
// Eerst proactief alle boolean velden corrigeren
this.initializeBooleanFields();
// Wacht tot updates zijn verwerkt, dan emit de form values
this.$nextTick(() => {
this.$emit('form-send-submit', this.localFormValues);
});
},
getFieldsForDisplay() { getFieldsForDisplay() {
// Voor read-only weergave // Voor read-only weergave
if (Array.isArray(this.formData.fields)) { if (Array.isArray(this.formData.fields)) {
@@ -410,7 +462,15 @@ export default {
// Format different field types // Format different field types
if (field.type === 'boolean') { if (field.type === 'boolean') {
return value ? true : false; const iconName = value ? 'check_circle' : 'cancel';
const label = value ? 'Ja' : 'Nee';
const cssClass = value ? 'boolean-true' : 'boolean-false';
return `<span class="material-symbols-outlined boolean-icon ${cssClass}"
aria-label="${label}"
title="${label}">
${iconName}
</span>`;
} else if (field.type === 'enum' && !value && field.default) { } else if (field.type === 'enum' && !value && field.default) {
return field.default; return field.default;
} }
@@ -450,6 +510,26 @@ export default {
title: title, title: title,
contentUrl: contentUrl contentUrl: contentUrl
}); });
},
// Handle Enter key press in form fields
handleEnterKey(event) {
console.log('DynamicForm: Enter event received, emitting form-enter-pressed');
// Prevent default form submission
event.preventDefault();
// Emit event to parent (ChatInput) to trigger send
this.$emit('form-enter-pressed');
},
// Focus management - auto-focus on first form field
focusFirstField() {
if (this.readOnly) return; // Don't focus in read-only mode
// Find the first focusable input element
const firstInput = this.$el.querySelector('input:not([type="hidden"]):not([type="radio"]):not([type="checkbox"]), textarea, select');
if (firstInput) {
firstInput.focus();
}
} }
} }
}; };
@@ -463,6 +543,8 @@ export default {
} }
.dynamic-form { .dynamic-form {
background: var(--human-message-background);
border-radius: 8px;
padding: 15px; padding: 15px;
box-shadow: 0 2px 15px rgba(0,0,0,0.1); box-shadow: 0 2px 15px rgba(0,0,0,0.1);
} }
@@ -472,11 +554,11 @@ export default {
align-items: center; align-items: center;
margin-bottom: 15px; margin-bottom: 15px;
padding-bottom: 10px; padding-bottom: 10px;
border-bottom: 1px solid var(--active-text-color); border-bottom: 1px solid var(--human-message-text-color);
} }
.dynamic-form.readonly .form-header { .dynamic-form.readonly .form-header {
border-bottom: 1px solid var(--history-message-text-color); border-bottom: 1px solid #777;
} }
.form-icon { .form-icon {
@@ -486,21 +568,21 @@ export default {
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: center; justify-content: center;
color: var(--active-text-color); color: var(--human-message-text-color);
} }
.dynamic-form.readonly .form-icon { .dynamic-form.readonly .form-icon {
color: var(--history-message-text-color); color: #777;
} }
.form-title { .form-title {
font-size: 1.2rem; font-size: 1.2rem;
font-weight: 600; font-weight: 600;
color: var(--active-text-color); color: var(--human-message-text-color);
} }
.dynamic-form.readonly .form-title { .dynamic-form.readonly .form-title {
color: var(--history-message-text-color); color: #777;
} }
.form-fields { .form-fields {
@@ -650,7 +732,7 @@ export default {
} }
.dynamic-form.readonly .form-field-readonly { .dynamic-form.readonly .form-field-readonly {
border-bottom: 1px solid var(--history-message-text-color); border-bottom: 1px solid #777;
} }
.field-label { .field-label {
@@ -659,20 +741,73 @@ export default {
padding-right: 10px; padding-right: 10px;
} }
.dynamic-form.readonly .field-label {
color: var(--history-message-text-color);
}
.field-value { .field-value {
flex: 1; flex: 1;
word-break: break-word; word-break: break-word;
} }
.dynamic-form.readonly .field-value {
color: var(--history-message-text-color);
}
.text-value { .text-value {
white-space: pre-wrap; white-space: pre-wrap;
} }
/* Boolean icon styling */
.boolean-icon {
font-size: 20px;
vertical-align: middle;
}
.boolean-true {
color: #4caf50; /* Groen voor true */
}
.boolean-false {
color: #f44336; /* Rood voor false */
}
.field-value.boolean-value {
display: flex;
align-items: center;
}
/* Send button styling (ChatInput consistency) */
.send-btn {
display: flex;
align-items: center;
justify-content: center;
width: 40px;
height: 40px;
background-color: var(--human-message-background);
color: var(--human-message-text-color);
border: 2px solid var(--human-message-text-color);
border-radius: 50%;
cursor: pointer;
transition: background-color 0.2s;
}
.send-btn:hover:not(:disabled) {
background-color: var(--human-message-background);
}
.send-btn:disabled {
background-color: #ccc;
cursor: not-allowed;
}
/* Loading spinner for send button */
.loading-spinner {
display: inline-block;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* Flexbox layout for send button mode */
.form-actions.with-send-button {
display: flex;
justify-content: flex-end;
align-items: center;
}
</style> </style>

View File

@@ -24,6 +24,7 @@
:required="field.required" :required="field.required"
:placeholder="field.placeholder || ''" :placeholder="field.placeholder || ''"
:title="description" :title="description"
@keydown.enter="handleEnterKey"
style="width: 100%; padding: 8px; border-radius: 4px; border: 1px solid #ddd; box-sizing: border-box;" style="width: 100%; padding: 8px; border-radius: 4px; border: 1px solid #ddd; box-sizing: border-box;"
> >
@@ -37,6 +38,7 @@
:step="stepValue" :step="stepValue"
:placeholder="field.placeholder || ''" :placeholder="field.placeholder || ''"
:title="description" :title="description"
@keydown.enter="handleEnterKey"
style="width: 100%; padding: 8px; border-radius: 4px; border: 1px solid #ddd; box-sizing: border-box;" style="width: 100%; padding: 8px; border-radius: 4px; border: 1px solid #ddd; box-sizing: border-box;"
> >
@@ -49,6 +51,7 @@
:rows="field.rows || 3" :rows="field.rows || 3"
:placeholder="field.placeholder || ''" :placeholder="field.placeholder || ''"
:title="description" :title="description"
@keydown="handleTextareaKeydown"
style="width: 100%; padding: 8px; border-radius: 4px; border: 1px solid #ddd; box-sizing: border-box; resize: vertical;" style="width: 100%; padding: 8px; border-radius: 4px; border: 1px solid #ddd; box-sizing: border-box; resize: vertical;"
></textarea> ></textarea>
@@ -196,7 +199,7 @@ export default {
default: null default: null
} }
}, },
emits: ['update:modelValue', 'open-privacy-modal', 'open-terms-modal'], emits: ['update:modelValue', 'open-privacy-modal', 'open-terms-modal', 'keydown-enter'],
setup() { setup() {
// Consent text constants (English base) // Consent text constants (English base)
const consentTexts = { const consentTexts = {
@@ -321,6 +324,25 @@ export default {
openTermsModal(event) { openTermsModal(event) {
event.preventDefault(); event.preventDefault();
this.$emit('open-terms-modal'); this.$emit('open-terms-modal');
},
// Handle Enter key press for text and number inputs
handleEnterKey(event) {
console.log('FormField: Enter pressed in field:', this.fieldId);
event.preventDefault();
this.$emit('keydown-enter');
},
// Handle keydown for textarea (Enter to submit, Shift+Enter for line breaks)
handleTextareaKeydown(event) {
console.log('FormField: Textarea keydown in field:', this.fieldId, 'Key:', event.key, 'Ctrl:', event.ctrlKey, 'Shift:', event.shiftKey);
if (event.key === 'Enter' && !event.shiftKey) {
// Plain Enter submits the form
console.log('FormField: Textarea Enter triggered for field:', this.fieldId);
event.preventDefault();
this.$emit('keydown-enter');
}
// Shift+Enter allows line breaks in textarea
} }
} }
}; };
@@ -462,7 +484,6 @@ export default {
.field-context { .field-context {
margin-bottom: 8px; margin-bottom: 8px;
font-size: 0.9rem; font-size: 0.9rem;
color: #666;
padding: 8px; padding: 8px;
border-radius: 4px; border-radius: 4px;
text-align: left; text-align: left;

View File

@@ -7,32 +7,35 @@
<slot name="loading"></slot> <slot name="loading"></slot>
</div> </div>
<!-- Empty state --> <!-- Messages wrapper for bottom alignment -->
<div v-if="normalMessages.length === 0" class="empty-state"> <div class="messages-wrapper">
<div class="empty-icon">💬</div> <!-- Empty state (only show when no messages) -->
<div class="empty-text">Nog geen berichten</div> <div v-if="normalMessages.length === 0" class="empty-state">
<div class="empty-subtext">Start een gesprek door een bericht te typen!</div> <div class="empty-icon">💬</div>
</div> <div class="empty-text">Nog geen berichten</div>
<div class="empty-subtext">Start een gesprek door een bericht te typen!</div>
</div>
<!-- Normal message list (excluding temporarily positioned AI messages) --> <!-- Normal message list (excluding temporarily positioned AI messages) -->
<template v-if="normalMessages.length > 0"> <template v-if="normalMessages.length > 0">
<!-- Messages --> <!-- Messages -->
<template v-for="(message, index) in normalMessages" :key="message.id"> <template v-for="(message, index) in normalMessages" :key="message.id">
<!-- The actual message --> <!-- The actual message -->
<chat-message <chat-message
:message="message" :message="message"
:is-submitting-form="isSubmittingForm" :is-submitting-form="isSubmittingForm"
:api-prefix="apiPrefix" :api-prefix="apiPrefix"
:is-latest-ai-message="isLatestAiMessage(message)" :is-latest-ai-message="isLatestAiMessage(message)"
@image-loaded="handleImageLoaded" @image-loaded="handleImageLoaded"
@specialist-complete="$emit('specialist-complete', $event)" @specialist-complete="$emit('specialist-complete', $event)"
@specialist-error="$emit('specialist-error', $event)" @specialist-error="$emit('specialist-error', $event)"
></chat-message> ></chat-message>
</template>
</template> </template>
</template>
<!-- Typing indicator --> <!-- Typing indicator -->
<typing-indicator v-if="isTyping"></typing-indicator> <typing-indicator v-if="isTyping"></typing-indicator>
</div>
</div> </div>
</div> </div>
@@ -96,14 +99,20 @@ export default {
watch: { watch: {
messages: { messages: {
handler(newMessages, oldMessages) { handler(newMessages, oldMessages) {
// Auto-scroll when new messages are added const hasNewMessages = newMessages.length > (oldMessages?.length || 0);
if (this.autoScroll && newMessages.length > (oldMessages?.length || 0)) {
// Always auto-scroll when new messages are added (regardless of current scroll position)
if (this.autoScroll && hasNewMessages) {
// Double $nextTick for better DOM update synchronization
this.$nextTick(() => { this.$nextTick(() => {
this.scrollToBottom(); this.$nextTick(() => {
this.scrollToBottom(true);
});
}); });
} }
}, },
deep: true deep: true,
immediate: false
}, },
isTyping(newVal) { isTyping(newVal) {
if (newVal && this.autoScroll) { if (newVal && this.autoScroll) {
@@ -188,13 +197,16 @@ export default {
} }
}, },
scrollToBottom() { scrollToBottom(force = false) {
const container = this.$refs.messagesContainer; const container = this.$refs.messagesContainer;
if (container) { if (container) {
container.scrollTop = container.scrollHeight; // Use requestAnimationFrame for better timing
this.isAtBottom = true; requestAnimationFrame(() => {
this.showScrollButton = false; container.scrollTop = container.scrollHeight;
this.unreadCount = 0; this.isAtBottom = true;
this.showScrollButton = false;
this.unreadCount = 0;
});
} }
}, },
@@ -209,7 +221,7 @@ export default {
const container = this.$refs.messagesContainer; const container = this.$refs.messagesContainer;
if (!container) return; if (!container) return;
const threshold = 100; // pixels from bottom const threshold = 50; // Reduced threshold for better detection
const isNearBottom = container.scrollHeight - container.scrollTop - container.clientHeight < threshold; const isNearBottom = container.scrollHeight - container.scrollTop - container.clientHeight < threshold;
this.isAtBottom = isNearBottom; this.isAtBottom = isNearBottom;
@@ -221,7 +233,7 @@ export default {
}, },
handleImageLoaded() { handleImageLoaded() {
// Auto-scroll when images load to maintain position // Auto-scroll when img load to maintain position
if (this.isAtBottom) { if (this.isAtBottom) {
this.$nextTick(() => this.scrollToBottom()); this.$nextTick(() => this.scrollToBottom());
} }
@@ -273,8 +285,19 @@ export default {
overflow-y: auto; overflow-y: auto;
padding: 10px; padding: 10px;
scroll-behavior: smooth; scroll-behavior: smooth;
/* Bottom-aligned messages implementation */
display: flex;
flex-direction: column;
justify-content: flex-end;
min-height: 100%;
} }
.messages-wrapper {
display: flex;
flex-direction: column;
gap: 10px; /* Space between messages */
}
.load-more-indicator { .load-more-indicator {
text-align: center; text-align: center;

View File

@@ -10,7 +10,8 @@
<!-- Custom theme colors from tenant settings --> <!-- Custom theme colors from tenant settings -->
<style> <style>
:root { :root {
/* Legacy support - keeping for backward compatibility */ /* Legacy support - keeping for backward compatibility only */
/* These variables are deprecated and should not be used in new code */
--primary-color: {{ customisation.active_background_color|default('#ffffff') }}; --primary-color: {{ customisation.active_background_color|default('#ffffff') }};
--secondary-color: {{ customisation.active_text_color|default('#212529') }}; --secondary-color: {{ customisation.active_text_color|default('#212529') }};
@@ -28,13 +29,18 @@
/* Active elements customisation */ /* Active elements customisation */
--active-background-color: {{ customisation.active_background_color|default('#ffffff') }}; --active-background-color: {{ customisation.active_background_color|default('#ffffff') }};
--active-text-color: {{ customisation.active_text_color|default('#212529') }};
/* History customisation with alpha-based color manipulation */ /* History customisation with alpha-based color manipulation */
--history-background: {{ customisation.history_background|default(10)|adjust_color_alpha }}; --history-background: {{ customisation.history_background|default(10)|adjust_color_alpha }};
--history-user-message-background: {{ customisation.history_user_message_background|default(-10)|adjust_color_alpha }};
--history-ai-message-background: {{ customisation.history_ai_message_background|default(0)|adjust_color_alpha }}; /* AI Message Colors */
--history-message-text-color: {{ customisation.history_message_text_color|default('#212529') }}; --ai-message-background: {{ customisation.ai_message_background|default('#f5f7fa') }};
--ai-message-text-color: {{ customisation.ai_message_text_color|default('#212529') }};
/* Human Message Colors */
--human-message-background: {{ customisation.human_message_background|default('#ffffff') }};
--human-message-text-color: {{ customisation.human_message_text_color|default('#212529') }};
} }
</style> </style>

View File

@@ -18,3 +18,12 @@ LANGUAGE_LEVEL = [
"ideal_audience": "Management, HR, technical profiles" "ideal_audience": "Management, HR, technical profiles"
} }
] ]
def get_language_level_context(language_level:str) -> str:
selected_language_level = next(
(item for item in LANGUAGE_LEVEL if item["name"] == language_level),
None
)
language_level_context = (f"{selected_language_level['description']}, "
f"corresponding to CEFR level {selected_language_level['cefr_level']}")
return language_level_context

View File

@@ -30,3 +30,12 @@ TONE_OF_VOICE = [
"when_to_use": "Technical, logistics, blue-collar jobs, production environments" "when_to_use": "Technical, logistics, blue-collar jobs, production environments"
} }
] ]
def get_tone_of_voice_context(tone_of_voice: str) -> str:
selected_tone_of_voice = next(
(item for item in TONE_OF_VOICE if item["name"] == tone_of_voice),
None # fallback indien niet gevonden
)
tone_of_voice_context = f"{selected_tone_of_voice["description"]}"
return tone_of_voice_context

View File

@@ -7,3 +7,7 @@ class RAGOutput(BaseModel):
answer: str = Field(None, description="Answer to the questions asked, in Markdown format.") answer: str = Field(None, description="Answer to the questions asked, in Markdown format.")
insufficient_info: bool = Field(None, description="An indication if there's insufficient information to answer") insufficient_info: bool = Field(None, description="An indication if there's insufficient information to answer")
model_config = {
"extra": "allow"
}

View File

@@ -0,0 +1,12 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class TraicieAdvancedRAGOutput(BaseModel):
mode: str = Field(..., description="The mode of execution (RAG or CHECK).")
answer: str = Field(None, description="Answer to the questions asked, when in RAG")
insufficient_info: bool = Field(None, description="An indication if there's insufficient information to answer, when in RAG")
affirmative_answer: bool = Field(None, description="An indication if the answer is affirmative (true) or negative (false), when in CHECK")

View File

@@ -0,0 +1,10 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class TraicieAffirmativeAnswerOutput(BaseModel):
affirmative: bool = Field(..., description="Is the provided answer affirmative (true) or negative (false).")

View File

@@ -0,0 +1,10 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class TraicieInterviewModeOutput(BaseModel):
mode: str = Field(..., description="Your answer: RAG or CHECK.")

View File

@@ -0,0 +1,144 @@
# retrievers/standard_rag.py
import json
from datetime import datetime as dt, timezone as tz
from typing import Dict, Any, List
from sqlalchemy import func, or_, desc
from sqlalchemy.exc import SQLAlchemyError
from flask import current_app
from common.extensions import db
from common.models.document import Document, DocumentVersion, Catalog, Retriever
from common.models.user import Tenant
from common.utils.datetime_utils import get_date_in_timezone
from common.utils.model_utils import get_embedding_model_and_class
from eveai_chat_workers.retrievers.base_retriever import BaseRetriever
from eveai_chat_workers.retrievers.retriever_typing import RetrieverArguments, RetrieverResult, RetrieverMetadata
class RetrieverExecutor(BaseRetriever):
"""Standard RAG retriever implementation"""
def __init__(self, tenant_id: int, retriever_id: int):
super().__init__(tenant_id, retriever_id)
@property
def type(self) -> str:
return "TRAICIE_ROLE_DEFINITION_BY_ROLE_IDENTIFICATION"
@property
def type_version(self) -> str:
return "1.0"
def retrieve(self, arguments: RetrieverArguments) -> List[RetrieverResult]:
"""
Retrieve documents based on query
Args:
arguments: Validated RetrieverArguments containing at minimum:
- query: str - The search query
- role_reference: str - Role reference to filter on (required)
Returns:
List[RetrieverResult]: List of retrieved documents with similarity scores
"""
try:
question = arguments.question
# Check if role_reference is provided
role_reference = getattr(arguments, 'role_reference', None)
if not role_reference:
current_app.logger.warning(f'No role_reference provided for TRAICIE_ROLE_DEFINITION_BY_ROLE_IDENTIFICATION retriever')
return []
# Get query embedding
query_embedding = self.embedding_model.embed_query(question)
# Get the appropriate embedding database model
db_class = self.embedding_model_class
# Get current date for validity checks
current_date = dt.now(tz=tz.utc).date()
# Create subquery for latest versions
subquery = (
db.session.query(
DocumentVersion.doc_id,
func.max(DocumentVersion.id).label('latest_version_id')
)
.filter(DocumentVersion.catalog_properties['role_reference'].astext == role_reference)
.group_by(DocumentVersion.doc_id)
.subquery()
)
similarity_threshold = self.retriever.configuration.get('es_similarity_threshold', 0.3)
k = self.retriever.configuration.get('es_k', 8)
# Main query
query_obj = (
db.session.query(
db_class,
DocumentVersion.url,
(1 - db_class.embedding.cosine_distance(query_embedding)).label('similarity')
)
.join(DocumentVersion, db_class.doc_vers_id == DocumentVersion.id)
.join(Document, DocumentVersion.doc_id == Document.id)
.join(subquery, DocumentVersion.id == subquery.c.latest_version_id)
.filter(
or_(Document.valid_from.is_(None), func.date(Document.valid_from) <= current_date),
or_(Document.valid_to.is_(None), func.date(Document.valid_to) >= current_date),
(1 - db_class.embedding.cosine_distance(query_embedding)) > similarity_threshold,
Document.catalog_id == self.catalog_id
)
.order_by(desc('similarity'))
.limit(k)
)
results = query_obj.all()
# Transform results into standard format
processed_results = []
for doc, url, similarity in results:
# Parse user_metadata to ensure it's a dictionary
user_metadata = self._parse_metadata(doc.document_version.user_metadata)
processed_results.append(
RetrieverResult(
id=doc.id,
chunk=doc.chunk,
similarity=float(similarity),
metadata=RetrieverMetadata(
document_id=doc.document_version.doc_id,
version_id=doc.document_version.id,
document_name=doc.document_version.document.name,
url=url or "",
user_metadata=user_metadata,
)
)
)
# Log the retrieval
if self.tuning:
compiled_query = str(query_obj.statement.compile(
compile_kwargs={"literal_binds": True} # This will include the actual values in the SQL
))
self.log_tuning('retrieve', {
"arguments": arguments.model_dump(),
"similarity_threshold": similarity_threshold,
"k": k,
"query": compiled_query,
"Raw Results": str(results),
"Processed Results": [r.model_dump() for r in processed_results],
})
return processed_results
except SQLAlchemyError as e:
current_app.logger.error(f'Error in RAG retrieval: {e}')
db.session.rollback()
raise
except Exception as e:
current_app.logger.error(f'Unexpected error in RAG retrieval: {e}')
raise

View File

@@ -77,7 +77,7 @@ class EveAICrewAICrew(Crew):
def __init__(self, specialist, name: str, **kwargs): def __init__(self, specialist, name: str, **kwargs):
if specialist.tuning: if specialist.tuning:
log_file = f"logs/crewai/{specialist.session_id}_{specialist.task_id}.txt" kwargs['output_log_file'] = f"/app/logs/crew_{name}.txt"
super().__init__(**kwargs) super().__init__(**kwargs)
self.specialist = specialist self.specialist = specialist
@@ -106,7 +106,7 @@ class EveAICrewAICrew(Crew):
class EveAICrewAIFlow(Flow): class EveAICrewAIFlow(Flow):
specialist: Any = Field(default=None, exclude=True) specialist: Any = Field(default=None, exclude=True)
name: str = Field(default=None, exclude=True) # name: str = Field(default=None, exclude=True)
model_config = ConfigDict(arbitrary_types_allowed=True) model_config = ConfigDict(arbitrary_types_allowed=True)
def __init__(self, specialist, name: str, **kwargs): def __init__(self, specialist, name: str, **kwargs):

View File

@@ -67,6 +67,10 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
# Format history for the prompt # Format history for the prompt
self._formatted_history = self._generate_formatted_history() self._formatted_history = self._generate_formatted_history()
self.arguments = None
self.formatted_context = None
self.citations = None
@property @property
def formatted_history(self) -> str: def formatted_history(self) -> str:
if not self._formatted_history: if not self._formatted_history:
@@ -75,18 +79,20 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
def _generate_formatted_history(self) -> str: def _generate_formatted_history(self) -> str:
"""Generate the formatted history string from cached session interactions.""" """Generate the formatted history string from cached session interactions."""
current_app.logger.debug(f"Generating formatted history for {self.session_id}")
current_app.logger.debug(f"Cached session interactions: {len(self._cached_session.interactions)}")
return "\n\n".join([ return "\n\n".join([
"\n\n".join([ "\n\n".join([
f"HUMAN:\n" f"HUMAN:\n"
f"{interaction.specialist_arguments['question']}" f"{interaction.specialist_arguments['question']}"
if interaction.specialist_arguments.get('question') else "", if interaction.specialist_arguments.get('question') and interaction.specialist_arguments.get('question') != "Initialize" else "",
f"{interaction.specialist_arguments.get('form_values')}" f"{interaction.specialist_arguments.get('form_values')}"
if interaction.specialist_arguments.get('form_values') else "", if interaction.specialist_arguments.get('form_values') else "",
f"AI:\n{interaction.specialist_results['answer']}" f"AI:\n{interaction.specialist_results['answer']}"
if interaction.specialist_results.get('answer') else "" if interaction.specialist_results.get('answer') else ""
]).strip() ]).strip()
for interaction in self._cached_session.interactions for interaction in self._cached_session.interactions
if interaction.specialist_arguments.get('question') != "Initialize"
]) ])
def _add_task_agent(self, task_name: str, agent_name: str): def _add_task_agent(self, task_name: str, agent_name: str):
@@ -347,6 +353,7 @@ class CrewAIBaseSpecialistExecutor(BaseSpecialistExecutor):
raise NotImplementedError raise NotImplementedError
def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult: def execute_specialist(self, arguments: SpecialistArguments) -> SpecialistResult:
self.log_tuning("*****************************************************************************************", {})
if self.retrievers: if self.retrievers:
formatted_context = None formatted_context = None
citations = None citations = None

View File

@@ -19,7 +19,7 @@ from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew,
from common.services.interaction.specialist_services import SpecialistServices from common.services.interaction.specialist_services import SpecialistServices
NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST" NEW_SPECIALIST_TYPE = "TRAICIE_SELECTION_SPECIALIST"
NEW_SPECIALIST_TYPE_VERSION = "1.4" NEW_SPECIALIST_TYPE_VERSION = "1.5"
class SpecialistExecutor(CrewAIBaseSpecialistExecutor): class SpecialistExecutor(CrewAIBaseSpecialistExecutor):

View File

@@ -24,16 +24,56 @@ from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew,
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
INITIALISATION_MESSAGE = "Thank you for showing your interest! Let's start the selection process by asking you a few important questions." INITIALISATION_MESSAGES = [
START_SELECTION_QUESTION = "Do you want to start the selection procedure?" "Great! Let's see if this job might be a match for you by going through a few questions.",
"Nice to hear that! Ill start with a first question to kick things off.",
"Good to know! Lets begin with the first question.",
"Thanks for your reply. Let's get started with a few short questions.",
"Excellent! Here's a first question to explore your fit with the role.",
"Glad to hear that. Let's start with the first question.",
"Appreciate your response! Ill now ask you the first question.",
"Awesome! Let's begin with a few questions to learn more about you.",
"Perfect, thank you. Let's start the matching process with a first question.",
"Thanks for sharing that. Ready for the first question?"
]
START_SELECTION_QUESTIONS = [
"Shall we see if this job could be a good fit for you?",
"Shall we go through a few questions to explore if there's a potential match?",
"May I ask you a first question?",
"Are you interested in applying for this position?",
"Would you like to take the next step and answer a few short questions?",
"Shall we begin the application process together?",
"Would you like to start the matching process to see if this role suits your preferences?",
"Lets explore if this opportunity aligns with what you're looking for — ready for a few questions?",
"Would you be open to answering a few questions to learn more about the role and your fit?",
"Would you like to continue and start the first part of the application journey?"
]
INSUFFICIENT_INFORMATION_MESSAGE = ( INSUFFICIENT_INFORMATION_MESSAGE = (
"We do not have the necessary information to provide you with the requested answers. " "We do not have the necessary information to provide you with the requested answers. "
"Please accept our apologies. You can ask other questions or proceed with the " "Please accept our apologies. You can ask other questions or proceed with the "
"selection process.") "selection process.")
KO_CRITERIA_NOT_MET_MESSAGE = ("Thank you for answering our questions! We processed your answers. Unfortunately, you do" KO_CRITERIA_NOT_MET_MESSAGES = [
"not comply with the minimum requirements for this job. Therefor, we stop this" "Thank you for your answers. Based on your responses, we won't be moving forward with this particular role. We do encourage you to keep an eye on our website for future opportunities.",
"selection procedure") "We appreciate the time you took to answer our questions. At this point, we wont be proceeding with your application, but feel free to check our website regularly for new vacancies.",
KO_CRITERIA_MET_MESSAGE = "We processed your answers with a positive result." "Thanks for your input. While were not continuing with your application for this role, wed be happy to welcome your interest again in the future — new opportunities are posted regularly on our site.",
"Thank you for participating. Although this role doesnt seem to be the right match right now, we invite you to stay connected and check back for other opportunities.",
"We truly appreciate your time and effort. Unfortunately, we wont be progressing with this application, but we encourage you to visit our website again for future job openings.",
"Thanks so much for answering our questions. This role may not be the right fit, but wed love for you to consider applying again when new positions become available.",
"We value your interest in this position. While we wont be moving forward in this case, we warmly invite you to explore other roles with us in the future.",
"Your input has been very helpful. Although we're not proceeding at this time, we thank you for your interest and hope to see you again for other opportunities.",
"Thank you for taking part in the process. We wont continue with your application for this role, but we invite you to stay informed about future openings through our website."
]
KO_CRITERIA_MET_MESSAGES = [
"Thank you for your answers. They correspond to some key elements of the role. Would you be open to sharing your contact details so we can continue the selection process?",
"We appreciate your input. Based on your answers, we'd like to continue the conversation. Could you share your contact information with us?",
"Thanks for your replies. To proceed with the application process, may we ask you to provide your contact details?",
"Your answers help us better understand your background. If you're open to it, can share your contact info so we can follow up?",
"Thank you for taking the time to answer these questions. If you'd like to continue, could we have your contact information?",
"Your responses give us a good first impression. In order to move forward with the process, could you share your contact details?",
"Weve reviewed your answers with interest. To take the next step, would you be willing to share your contact information?",
"Your input has been recorded. If youre comfortable doing so, will you please leave your contact information so we can reach out for the next steps?",
"Wed like to keep in touch regarding the next phases of the selection. Could you provide your contact details for further communication?"
]
KO_CRITERIA_NEXT_MESSAGES = [ KO_CRITERIA_NEXT_MESSAGES = [
"Thank you for your answer. Here's a next question.", "Thank you for your answer. Here's a next question.",
"Your answer fits our needs. We have yet another question to ask you.", "Your answer fits our needs. We have yet another question to ask you.",
@@ -42,23 +82,32 @@ KO_CRITERIA_NEXT_MESSAGES = [
"Appreciate your reply! Here's the next one.", "Appreciate your reply! Here's the next one.",
"Thanks for the input. Lets move on to the next question.", "Thanks for the input. Lets move on to the next question.",
"Thats exactly what we needed to hear. Here comes the next question.", "Thats exactly what we needed to hear. Here comes the next question.",
"Looks promising! Lets continue with another quick check.", "Looks promising! Lets continue with another quick check."
"Thanks! Here's another point we'd like to clarify."
] ]
RQC_MESSAGE = "You are well suited for this job."
CONTACT_DATA_QUESTION = ("Are you willing to provide us with your contact data, so we can contact you to continue "
"the selection process?")
CONTACT_DATA_GUIDING_MESSAGE = ("Thank you for trusting your contact data with us. Below you find a form to help you " CONTACT_DATA_GUIDING_MESSAGE = ("Thank you for trusting your contact data with us. Below you find a form to help you "
"to provide us the necessary information.") "to provide us the necessary information.")
NO_CONTACT_DATA_QUESTION = ("We are sorry to hear that. The only way to proceed with the selection process is " NO_CONTACT_DATA_QUESTIONS = [
"to provide us with your contact data. Do you want to provide us with your contact data?" "That's a pity! In order to continue, we do need your contact details. Would you be willing to share them? ",
"if not, we thank you, and we'll end the selection process.") "We understand your hesitation. However, to proceed with the process, your contact information is required. Would you like to share it with us?",
"Unfortunately, we can only move forward if you provide your contact details. Would you still consider sharing them with us?",
"Its totally your choice, of course. But without your contact details, we cant proceed further. Would you be open to sharing them?",
"Wed love to keep going, but we can only do so if we have your contact details. Would you like to provide them now?",
"Your privacy matters, and we respect your decision. Just know that without your contact details, well need to end the process here. Still interested in moving forward?",
"Its a shame to stop here, but we do need your contact info to proceed. Would you like to share it so we can continue?"
]
CONTACT_DATA_PROCESSED_MESSAGE = "Thank you for allowing us to contact you." CONTACT_DATA_PROCESSED_MESSAGE = "Thank you for allowing us to contact you."
CONTACT_TIME_QUESTION = "When do you prefer us to contact you? You can select some options in the provided form" CONTACT_TIME_QUESTION = "When do you prefer us to contact you? You can select some options in the provided form"
NO_CONTACT_TIME_MESSAGE = ("We could not process your preferred contact time. Can you please provide us with your " CONTACT_TIME_PROCESSED_MESSAGES = [
"preferred contact time?") "Thank you! We've received all the information we need to continue with the selection process. We'll get in touch with you as soon as possible. If you have any questions in the meantime, don't hesitate to ask.",
CONTACT_TIME_PROCESSED_MESSAGE = ("We successfully processed your preferred contact time. We will contact you as soon " "Great, we have everything we need to proceed. We'll be in touch shortly. Don't hesitate to ask if anything comes up in the meantime.",
"as possible.") "Thanks for providing your details. We now have all the necessary information and will contact you soon. If you have any further questions, we're here to help.",
"Perfect, your information has been received. We'll move forward and get back to you as soon as we can. Feel free to reach out if you have any questions.",
"All set! Weve received everything needed to move forward. We'll contact you soon. In the meantime, feel free to ask us anything.",
"Thanks again! We've got everything we need to proceed. Expect to hear from us shortly. If anything is unclear, you're welcome to ask further questions.",
"Excellent, we now have all the information required to take the next steps. Well be in touch as soon as possible. If you have any questions, just let us know.",
"We appreciate your input. With all the needed details in place, well reach out shortly to continue the process. Questions are always welcome in the meantime.",
"Thank you for completing this step. We have all the information we need and will contact you as soon as we can. If you have questions, we're happy to assist."
]
NO_FURTHER_QUESTIONS_MESSAGE = "We do not process further questions." NO_FURTHER_QUESTIONS_MESSAGE = "We do not process further questions."
SUCCESSFUL_ENDING_MESSAGE = "Thank you for your application. We will contact you as soon as possible!" SUCCESSFUL_ENDING_MESSAGE = "Thank you for your application. We will contact you as soon as possible!"
@@ -78,6 +127,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
# Load the Tenant & set language # Load the Tenant & set language
self.tenant = Tenant.query.get_or_404(tenant_id) self.tenant = Tenant.query.get_or_404(tenant_id)
self.specialist_phase = "initial" self.specialist_phase = "initial"
self.previous_ai_question = None
@property @property
def type(self) -> str: def type(self) -> str:
@@ -94,6 +144,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self._add_pydantic_output("rag_task", RAGOutput, "rag_output") self._add_pydantic_output("rag_task", RAGOutput, "rag_output")
def _config_state_result_relations(self): def _config_state_result_relations(self):
self._add_state_result_relation("ai_question")
self._add_state_result_relation("rag_output") self._add_state_result_relation("rag_output")
self._add_state_result_relation("ko_criteria_scores") self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("current_ko_criterium") self._add_state_result_relation("current_ko_criterium")
@@ -103,7 +154,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def _instantiate_specialist(self): def _instantiate_specialist(self):
verbose = self.tuning verbose = self.tuning
rag_agents = [self.rag_agent] rag_agents = [self.rag_agent]
rag_tasks = [self.rag_task] rag_tasks = [self.rag_task]
self.rag_crew = EveAICrewAICrew( self.rag_crew = EveAICrewAICrew(
@@ -113,7 +163,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
tasks=rag_tasks, tasks=rag_tasks,
verbose=verbose, verbose=verbose,
) )
self.flow = SelectionFlow( self.flow = SelectionFlow(
self, self,
self.rag_crew, self.rag_crew,
@@ -126,7 +175,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.specialist_phase = "initial" self.specialist_phase = "initial"
else: else:
self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial') self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
self.previous_ai_question = self._cached_session.interactions[-1].specialist_results.get('ai_question', '')
current_app.logger.debug(f"Current Specialist Phase: {self.specialist_phase}")
results = None results = None
match self.specialist_phase: match self.specialist_phase:
case "initial": case "initial":
@@ -166,10 +217,12 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, return self.execute_start_selection_procedure_state(arguments, formatted_context, citations,
welcome_message) welcome_message)
# We are in orientation mode, so we give a standard message, and move to rag state # We are in orientation mode, so we give a standard message, and move to rag state
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION, start_selection_question = TranslationServices.translate(self.tenant_id,
random.choice(START_SELECTION_QUESTIONS),
arguments.language) arguments.language)
self.flow.state.answer = f"{welcome_message}" self.flow.state.answer = f"{welcome_message}"
self.flow.state.phase = "rag" self.flow.state.phase = "rag"
self.flow.state.ai_question = welcome_message
results = SelectionResult.create_for_type(self.type, self.type_version) results = SelectionResult.create_for_type(self.type, self.type_version)
@@ -178,7 +231,8 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations, def execute_start_selection_procedure_state(self, arguments: SpecialistArguments, formatted_context, citations,
start_message=None) -> SpecialistResult: start_message=None) -> SpecialistResult:
initialisation_message = TranslationServices.translate(self.tenant_id, INITIALISATION_MESSAGE, initialisation_message = TranslationServices.translate(self.tenant_id,
random.choice(INITIALISATION_MESSAGES),
arguments.language) arguments.language)
if start_message: if start_message:
answer = f"{start_message}\n\n{initialisation_message}" answer = f"{start_message}\n\n{initialisation_message}"
@@ -254,14 +308,15 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.ko_criteria_scores.append(score) self.flow.state.ko_criteria_scores.append(score)
if evaluation == "negative": if evaluation == "negative":
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language) answer = TranslationServices.translate(self.tenant_id,
random.choices(KO_CRITERIA_NOT_MET_MESSAGES),
arguments.language)
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate" self.flow.state.phase = "no_valid_candidate"
results = SelectionResult.create_for_type(self.type, self.type_version) results = SelectionResult.create_for_type(self.type, self.type_version)
else: else:
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
next_idx = previous_idx + 1 next_idx = previous_idx + 1
if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated
@@ -269,8 +324,6 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, arguments.language) ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, arguments.language)
next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES) next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES)
answer = TranslationServices.translate(self.tenant_id, next_message, arguments.language) answer = TranslationServices.translate(self.tenant_id, next_message, arguments.language)
if rag_output:
answer = f"{rag_output.answer}\n\n{answer}"
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.form_request = ko_form self.flow.state.form_request = ko_form
@@ -278,13 +331,10 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.flow.state.current_ko_criterium_idx = next_idx self.flow.state.current_ko_criterium_idx = next_idx
self.flow.state.phase = "ko_question_evaluation" self.flow.state.phase = "ko_question_evaluation"
else: # All KO Criteria have been met else: # All KO Criteria have been met
answer = TranslationServices.translate(self.tenant_id, KO_CRITERIA_MET_MESSAGE, arguments.language) answer = TranslationServices.translate(self.tenant_id,
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations) random.choice(KO_CRITERIA_MET_MESSAGES),
if rag_output: arguments.language)
answer = f"{answer}\n\n{rag_output.answer}" self.flow.state.ai_question = answer
answer = (f"{answer}\n\n"
f"{TranslationServices.translate(self.tenant_id, RQC_MESSAGE, arguments.language)} \n\n"
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_QUESTION, arguments.language)}")
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.current_ko_criterium = "" self.flow.state.current_ko_criterium = ""
@@ -299,28 +349,25 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
-> SpecialistResult: -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_preparation started", {}) self.log_tuning("Traicie Selection Specialist personal_contact_data_preparation started", {})
if HumanAnswerServices.check_affirmative_answer(self.tenant_id, CONTACT_DATA_QUESTION, if HumanAnswerServices.check_affirmative_answer(self.tenant_id, self.previous_ai_question,
arguments.question, arguments.language): arguments.question, arguments.language):
contact_form = cache_manager.specialist_forms_config_cache.get_config("PERSONAL_CONTACT_FORM", "1.0") contact_form = cache_manager.specialist_forms_config_cache.get_config("MINIMAL_PERSONAL_CONTACT_FORM", "1.0")
contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields", contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields",
arguments.language) arguments.language)
guiding_message = TranslationServices.translate(self.tenant_id, CONTACT_DATA_GUIDING_MESSAGE, answer = TranslationServices.translate(self.tenant_id, CONTACT_DATA_GUIDING_MESSAGE,
arguments.language) arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{rag_output.answer}\n\n{guiding_message}"
else:
answer = guiding_message
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.form_request = contact_form self.flow.state.form_request = contact_form
self.flow.state.phase = "personal_contact_data_processing" self.flow.state.phase = "personal_contact_data_processing"
results = SelectionResult.create_for_type(self.type, self.type_version,) results = SelectionResult.create_for_type(self.type, self.type_version,)
else: else:
answer = TranslationServices.translate(self.tenant_id, NO_CONTACT_DATA_QUESTION, arguments.language) answer = TranslationServices.translate(self.tenant_id,
random.choice(NO_CONTACT_DATA_QUESTIONS),
arguments.language)
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.ai_question = answer
self.flow.state.phase = "personal_contact_data_preparation" self.flow.state.phase = "personal_contact_data_preparation"
results = SelectionResult.create_for_type(self.type, self.type_version,) results = SelectionResult.create_for_type(self.type, self.type_version,)
@@ -330,18 +377,16 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_personal_contact_data_processing(self, arguments: SpecialistArguments, formatted_context, citations) \ def execute_personal_contact_data_processing(self, arguments: SpecialistArguments, formatted_context, citations) \
-> SpecialistResult: -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_processing started", {}) self.log_tuning("Traicie Selection Specialist personal_contact_data_processing started", {})
contact_time_question = TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, arguments.language)
answer = ( answer = (
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, arguments.language)}\n" f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, arguments.language)} "
f"{TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, arguments.language)}") f"{contact_time_question}")
time_pref_form = cache_manager.specialist_forms_config_cache.get_config("CONTACT_TIME_PREFERENCES_SIMPLE", "1.0") time_pref_form = cache_manager.specialist_forms_config_cache.get_config("CONTACT_TIME_PREFERENCES_SIMPLE", "1.0")
time_pref_form = TranslationServices.translate_config(self.tenant_id, time_pref_form, "fields", time_pref_form = TranslationServices.translate_config(self.tenant_id, time_pref_form, "fields",
arguments.language) arguments.language)
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
if rag_output:
answer = f"{answer}\n\n{rag_output.answer}"
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.ai_question = contact_time_question
self.flow.state.phase = "contact_time_evaluation" self.flow.state.phase = "contact_time_evaluation"
self.flow.state.personal_contact_data = arguments.form_values self.flow.state.personal_contact_data = arguments.form_values
self.flow.state.form_request = time_pref_form self.flow.state.form_request = time_pref_form
@@ -361,11 +406,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
self.log_tuning("Traicie Selection Specialist contact_time_evaluation started", {}) self.log_tuning("Traicie Selection Specialist contact_time_evaluation started", {})
rag_output = self._check_and_execute_rag(arguments, formatted_context, citations) rag_output = self._check_and_execute_rag(arguments, formatted_context, citations)
message = TranslationServices.translate(self.tenant_id, CONTACT_TIME_PROCESSED_MESSAGE, arguments.language) answer = TranslationServices.translate(self.tenant_id,
random.choice(CONTACT_TIME_PROCESSED_MESSAGES),
answer = TranslationServices.translate(self.tenant_id, CONTACT_TIME_PROCESSED_MESSAGE, arguments.language) arguments.language)
if rag_output:
answer = f"{rag_output.answer}\n\n{message}"
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.phase = "candidate_selected" self.flow.state.phase = "candidate_selected"
@@ -387,8 +430,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
def execute_no_valid_candidate_state(self, arguments: SpecialistArguments, formatted_context, citations) \ def execute_no_valid_candidate_state(self, arguments: SpecialistArguments, formatted_context, citations) \
-> SpecialistResult: -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {}) self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
answer = (f"{TranslationServices.translate(self.tenant_id, KO_CRITERIA_NOT_MET_MESSAGE, arguments.language)}\n" answer = TranslationServices.translate(self.tenant_id,
f"{TranslationServices.translate(self.tenant_id, NO_FURTHER_QUESTIONS_MESSAGE, arguments.language)}\n") random.choice(KO_CRITERIA_NOT_MET_MESSAGES),
arguments.language)
self.flow.state.answer = answer self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate" self.flow.state.phase = "no_valid_candidate"
@@ -411,13 +455,14 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
-> SpecialistResult: -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist rag_state started", {}) self.log_tuning("Traicie Selection Specialist rag_state started", {})
start_selection_question = TranslationServices.translate(self.tenant_id, START_SELECTION_QUESTION, start_selection_question = TranslationServices.translate(self.tenant_id,
random.choice(START_SELECTION_QUESTIONS),
arguments.language) arguments.language)
rag_output = None rag_output = None
if HumanAnswerServices.check_additional_information(self.tenant_id, if HumanAnswerServices.check_additional_information(self.tenant_id,
START_SELECTION_QUESTION, random.choice(START_SELECTION_QUESTIONS),
arguments.question, arguments.question,
arguments.language): arguments.language):
rag_output = self.execute_rag(arguments, formatted_context, citations) rag_output = self.execute_rag(arguments, formatted_context, citations)
@@ -427,7 +472,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
answer = "" answer = ""
if HumanAnswerServices.check_affirmative_answer(self.tenant_id, if HumanAnswerServices.check_affirmative_answer(self.tenant_id,
START_SELECTION_QUESTION, random.choice(START_SELECTION_QUESTIONS),
arguments.question, arguments.question,
arguments.language): arguments.language):
return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer) return self.execute_start_selection_procedure_state(arguments, formatted_context, citations, answer)
@@ -468,9 +513,9 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
return rag_output return rag_output
def _check_and_execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput: def _check_and_execute_rag(self, arguments: SpecialistArguments, formatted_context, citations) -> RAGOutput | None:
if HumanAnswerServices.check_additional_information(self.tenant_id, if HumanAnswerServices.check_additional_information(self.tenant_id,
START_SELECTION_QUESTION, self.previous_ai_question,
arguments.question, arguments.question,
arguments.language): arguments.language):
rag_output = self.execute_rag(arguments, formatted_context, citations) rag_output = self.execute_rag(arguments, formatted_context, citations)
@@ -610,6 +655,7 @@ class SelectionInput(BaseModel):
class SelectionFlowState(EveAIFlowState): class SelectionFlowState(EveAIFlowState):
"""Flow state for RAG specialist that automatically updates from task outputs""" """Flow state for RAG specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None input: Optional[SelectionInput] = None
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = None rag_output: Optional[RAGOutput] = None
current_ko_criterium: Optional[str] = None current_ko_criterium: Optional[str] = None
current_ko_criterium_idx: Optional[int] = None current_ko_criterium_idx: Optional[int] = None
@@ -620,6 +666,7 @@ class SelectionFlowState(EveAIFlowState):
class SelectionResult(SpecialistResult): class SelectionResult(SpecialistResult):
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output") rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores") ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data") personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
@@ -643,6 +690,9 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
@listen(process_inputs) @listen(process_inputs)
async def execute_rag(self): async def execute_rag(self):
inputs = self.state.input.model_dump() inputs = self.state.input.model_dump()
current_app.logger.debug(f"execute_rag inputs: ---------------------------------------------------------------"
f" {inputs}")
try: try:
crew_output = await self.rag_crew.kickoff_async(inputs=inputs) crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump()) self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump())
@@ -658,6 +708,7 @@ class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
raise e raise e
async def kickoff_async(self, inputs=None): async def kickoff_async(self, inputs=None):
current_app.logger.debug(f"kickoff SelectionFlow: ---------------------------------------------------------------")
self.state.input = SelectionInput.model_validate(inputs) self.state.input = SelectionInput.model_validate(inputs)
result = await super().kickoff_async(inputs) result = await super().kickoff_async(inputs)
return self.state return self.state

View File

@@ -0,0 +1,793 @@
import json
import random
from datetime import date
from typing import Optional, List, Dict, Any
from crewai.flow.flow import start, listen, router
from flask import current_app
from pydantic import BaseModel, Field, EmailStr
from common.extensions import cache_manager, db, minio_client
from common.models.interaction import EveAIAsset
from common.models.user import Tenant
from common.services.interaction.capsule_services import CapsuleServices
from common.services.utils.human_answer_services import HumanAnswerServices
from common.services.utils.translation_services import TranslationServices
from common.utils.business_event_context import current_event
from common.utils.eveai_exceptions import EveAISpecialistExecutionError
from eveai_chat_workers.definitions.language_level.language_level_v1_0 import LANGUAGE_LEVEL, get_language_level_context
from eveai_chat_workers.definitions.tone_of_voice.tone_of_voice_v1_0 import TONE_OF_VOICE, get_tone_of_voice_context
from eveai_chat_workers.outputs.globals.basic_types.list_item import ListItem
from eveai_chat_workers.outputs.globals.rag.rag_v1_0 import RAGOutput
from eveai_chat_workers.outputs.traicie.affirmative_answer.affirmative_answer_v1_0 import TraicieAffirmativeAnswerOutput
from eveai_chat_workers.outputs.traicie.interview_mode.interview_mode_v1_0 import TraicieInterviewModeOutput
from eveai_chat_workers.outputs.traicie.knockout_questions.knockout_questions_v1_0 import KOQuestion, KOQuestions
from eveai_chat_workers.specialists.crewai_base_classes import EveAICrewAICrew, EveAICrewAIFlow, EveAIFlowState
from eveai_chat_workers.specialists.crewai_base_specialist import CrewAIBaseSpecialistExecutor
from eveai_chat_workers.specialists.specialist_typing import SpecialistResult, SpecialistArguments
INITIALISATION_MESSAGES = [
"Great! Let's see if this job might be a match for you by going through a few questions.",
"Nice to hear that! Ill start with a first question to kick things off.",
"Good to know! Lets begin with the first question.",
"Thanks for your reply. Let's get started with a few short questions.",
"Excellent! Here's a first question to explore your fit with the role.",
"Glad to hear that. Let's start with the first question.",
"Appreciate your response! Ill now ask you the first question.",
"Awesome! Let's begin with a few questions to learn more about you.",
"Perfect, thank you. Let's start the matching process with a first question.",
"Thanks for sharing that. Ready for the first question?"
]
START_SELECTION_QUESTIONS = [
"Shall we see if this job could be a good fit for you?",
"Shall we go through a few questions to explore if there's a potential match?",
"May I ask you a first question?",
"Are you interested in applying for this position?",
"Would you like to take the next step and answer a few short questions?",
"Shall we begin the application process together?",
"Would you like to start the matching process to see if this role suits your preferences?",
"Lets explore if this opportunity aligns with what you're looking for — ready for a few questions?",
"Would you be open to answering a few questions to learn more about the role and your fit?",
"Would you like to continue and start the first part of the application journey?"
]
TRY_TO_START_SELECTION_QUESTIONS = [
"That's a pity — we can only move forward if we start the selection process. Would you like to begin now?",
"We understand, though its worth mentioning that the only way to continue is to start the procedure. Shall we get started after all?",
"Too bad! To proceed, we do need to go through the selection steps. Would you be open to starting now?",
"Were sorry to hear that. The next steps are only possible if we start the selection process. Would you reconsider and allow us to begin?",
"That's unfortunate — continuing isnt possible without starting the process. Are you sure you dont want to begin now?",
"Thanks for your response. Just so you know: we can only continue if we go through the initial questions. Shall we start anyway?",
"We respect your answer, of course. Still, wed love to continue — but thats only possible if we begin the selection process. Can we do that now?",
"We get it — but to move forward, the selection process does need to be started. Would you like to give it a go?",
"Understood! However, we can't proceed without initiating the process. Would you like to start it now after all?",
"We appreciate your honesty. Just to clarify: the process only continues if we begin the selection. Shall we go ahead?"
]
INSUFFICIENT_INFORMATION_MESSAGES = [
"I'm afraid I don't have enough information to answer that properly. Feel free to ask something else!",
"There isnt enough data available right now to give you a clear answer. You're welcome to rephrase or ask a different question.",
"Sorry, I can't provide a complete answer based on the current information. Would you like to try asking something else?",
"I dont have enough details to give you a confident answer. You can always ask another question if youd like.",
"Unfortunately, I cant answer that accurately with the information at hand. Please feel free to ask something else.",
"Thats a great question, but I currently lack the necessary information to respond properly. Want to ask something different?",
"I wish I could help more, but the data I have isn't sufficient to answer this. Youre welcome to explore other questions.",
"Theres not enough context for me to provide a good answer. Dont hesitate to ask another question if you'd like!",
"I'm not able to give a definitive answer to that. Perhaps try a different question or angle?",
"Thanks for your question. At the moment, I cant give a solid answer — but I'm here if you want to ask something else!"
]
KO_CRITERIA_NOT_MET_MESSAGES = [
"Thank you for your answers. Based on your responses, we won't be moving forward with this particular role. We do encourage you to keep an eye on our website for future opportunities.",
"We appreciate the time you took to answer our questions. At this point, we wont be proceeding with your application, but feel free to check our website regularly for new vacancies.",
"Thanks for your input. While were not continuing with your application for this role, wed be happy to welcome your interest again in the future — new opportunities are posted regularly on our site.",
"Thank you for participating. Although this role doesnt seem to be the right match right now, we invite you to stay connected and check back for other opportunities.",
"We truly appreciate your time and effort. Unfortunately, we wont be progressing with this application, but we encourage you to visit our website again for future job openings.",
"Thanks so much for answering our questions. This role may not be the right fit, but wed love for you to consider applying again when new positions become available.",
"We value your interest in this position. While we wont be moving forward in this case, we warmly invite you to explore other roles with us in the future.",
"Your input has been very helpful. Although we're not proceeding at this time, we thank you for your interest and hope to see you again for other opportunities.",
"Thank you for taking part in the process. We wont continue with your application for this role, but we invite you to stay informed about future openings through our website."
]
KO_CRITERIA_MET_MESSAGES = [
"Thank you for your answers. They correspond to some key elements of the role. Would you be open to sharing your contact details so we can continue the selection process?",
"We appreciate your input. Based on your answers, we'd like to continue the conversation. Could you share your contact information with us?",
"Thanks for your replies. To proceed with the application process, may we ask you to provide your contact details?",
"Your answers help us better understand your background. If you're open to it, can share your contact info so we can follow up?",
"Thank you for taking the time to answer these questions. If you'd like to continue, could we have your contact information?",
"Your responses give us a good first impression. In order to move forward with the process, could you share your contact details?",
"Weve reviewed your answers with interest. To take the next step, would you be willing to share your contact information?",
"Your input has been recorded. If youre comfortable doing so, will you please leave your contact information so we can reach out for the next steps?",
"Wed like to keep in touch regarding the next phases of the selection. Could you provide your contact details for further communication?"
]
KO_CRITERIA_NEXT_MESSAGES = [
"Thank you for your answer. Here's a next question.",
"Your answer fits our needs. We have yet another question to ask you.",
"Positive this far! Here's a follow-up question.",
"Great, thats just what we were hoping for. Lets continue with another question.",
"Appreciate your reply! Here's the next one.",
"Thanks for the input. Lets move on to the next question.",
"Thats exactly what we needed to hear. Here comes the next question.",
"Looks promising! Lets continue with another quick check."
]
CONTACT_DATA_GUIDING_MESSAGE = ("Thank you for trusting your contact data with us. Below you find a form to help you "
"to provide us the necessary information.")
NO_CONTACT_DATA_QUESTIONS = [
"That's a pity! In order to continue, we do need your contact details. Would you be willing to share them? ",
"We understand your hesitation. However, to proceed with the process, your contact information is required. Would you like to share it with us?",
"Unfortunately, we can only move forward if you provide your contact details. Would you still consider sharing them with us?",
"Its totally your choice, of course. But without your contact details, we cant proceed further. Would you be open to sharing them?",
"Wed love to keep going, but we can only do so if we have your contact details. Would you like to provide them now?",
"Your privacy matters, and we respect your decision. Just know that without your contact details, well need to end the process here. Still interested in moving forward?",
"Its a shame to stop here, but we do need your contact info to proceed. Would you like to share it so we can continue?"
]
CONTACT_DATA_QUESTIONS = [
"Could you please share your contact details so we can reach out to you for the next steps in the selection process?",
"Would you be willing to provide your contact information so we can continue with your application?",
"Can you share a way for us to contact you as we move forward with the selection process?",
"May we have your contact details so we can follow up with the next steps?",
"Would you mind sharing your contact information to proceed with the selection?",
"Can you provide your email address or phone number so we can get in touch?",
"Shall we continue? If so, could you let us know how we can best reach you?",
"To move forward, may we contact you? If yes, could you share your details?",
"Are you comfortable sharing your contact information so we can follow up?",
"Would you like to continue the process by providing your contact details?"
]
CONTACT_DATA_PROCESSED_MESSAGE = "Thank you for allowing us to contact you."
CONTACT_TIME_QUESTION = "When do you prefer us to contact you? You can select some options in the provided form"
CONTACT_TIME_PROCESSED_MESSAGES = [
"Thank you! We've received all the information we need to continue with the selection process. We'll get in touch with you as soon as possible. If you have any questions in the meantime, don't hesitate to ask.",
"Great, we have everything we need to proceed. We'll be in touch shortly. Don't hesitate to ask if anything comes up in the meantime.",
"Thanks for providing your details. We now have all the necessary information and will contact you soon. If you have any further questions, we're here to help.",
"Perfect, your information has been received. We'll move forward and get back to you as soon as we can. Feel free to reach out if you have any questions.",
"All set! Weve received everything needed to move forward. We'll contact you soon. In the meantime, feel free to ask us anything.",
"Thanks again! We've got everything we need to proceed. Expect to hear from us shortly. If anything is unclear, you're welcome to ask further questions.",
"Excellent, we now have all the information required to take the next steps. Well be in touch as soon as possible. If you have any questions, just let us know.",
"We appreciate your input. With all the needed details in place, well reach out shortly to continue the process. Questions are always welcome in the meantime.",
"Thank you for completing this step. We have all the information we need and will contact you as soon as we can. If you have questions, we're happy to assist."
]
NO_FURTHER_QUESTIONS_MESSAGE = "We do not process further questions."
SUCCESSFUL_ENDING_MESSAGES = [
"Thank you for your application! We'll contact you as soon as possible. If you have any questions in the meantime, dont hesitate to reach out.",
"We appreciate your interest and the information youve shared. We'll be in touch shortly. Feel free to contact us if anything comes up.",
"Thanks again for your application. Well get back to you soon. In the meantime, were happy to answer any questions you may have.",
"Your application has been received. Well reach out to you as soon as we can. If you need anything in the meantime, just let us know.",
"Thank you for completing the first steps. Well follow up as quickly as possible. If you have further questions, we're here to help.",
"Thanks for taking the time to apply! Well contact you shortly. Let us know if you have any questions or need additional information.",
"Weve received everything we need for now — thank you! Well be in touch soon. Dont hesitate to ask if somethings unclear.",
"Were looking forward to speaking with you. Thanks again for your application, and feel free to reach out if you need anything.",
"Thanks! Well contact you soon to discuss the next steps. In the meantime, were happy to answer any further questions.",
"Your application is complete — thank you! Well be reaching out shortly. If youd like to ask anything in the meantime, were available."
]
class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
"""
type: TRAICIE_SELECTION_SPECIALIST
type_version: 1.1
Traicie Selection Specialist Executor class
"""
def __init__(self, tenant_id, specialist_id, session_id, task_id, **kwargs):
self.rag_crew = None
self.determination_crew = None
self.affirmative_answer_crew = None
super().__init__(tenant_id, specialist_id, session_id, task_id)
# Load the Tenant & set language
self.tenant = Tenant.query.get_or_404(tenant_id)
self.specialist_phase = "initial"
self.previous_ai_question = None
self.previous_interview_phase = None
@property
def type(self) -> str:
return "TRAICIE_SELECTION_SPECIALIST"
@property
def type_version(self) -> str:
return "1.5"
def _config_task_agents(self):
self._add_task_agent("advanced_rag_task", "rag_agent")
self._add_task_agent("traicie_determine_interview_mode_task", "traicie_recruiter_agent")
self._add_task_agent("traicie_affirmative_answer_check_task", "traicie_recruiter_agent")
def _config_pydantic_outputs(self):
self._add_pydantic_output("advanced_rag_task", RAGOutput, "rag_output")
self._add_pydantic_output("traicie_determine_interview_mode_task", TraicieInterviewModeOutput, "interview_mode")
self._add_pydantic_output("traicie_affirmative_answer_check_task", TraicieAffirmativeAnswerOutput, "affirmative_answer")
def _config_state_result_relations(self):
self._add_state_result_relation("ai_question")
self._add_state_result_relation("rag_output")
self._add_state_result_relation("ko_criteria_scores")
self._add_state_result_relation("current_ko_criterium")
self._add_state_result_relation("current_ko_criterium_idx")
self._add_state_result_relation("personal_contact_data")
self._add_state_result_relation("contact_time_prefs")
self._add_state_result_relation("interview_phase")
self._add_state_result_relation("interview_mode")
self._add_state_result_relation("affirmative_answer")
def _instantiate_specialist(self):
verbose = self.tuning
rag_agents = [self.rag_agent]
recruitment_agents = [self.traicie_recruiter_agent]
rag_tasks = [self.advanced_rag_task]
determination_tasks = [self.traicie_determine_interview_mode_task]
affirmative_answer_tasks = [self.traicie_affirmative_answer_check_task]
self.rag_crew = EveAICrewAICrew(
self,
"Advanced Rag Crew",
agents=rag_agents,
tasks=rag_tasks,
verbose=verbose,
)
self.determination_crew = EveAICrewAICrew(
self,
"Determination Crew",
agents=recruitment_agents,
tasks=determination_tasks,
verbose=verbose,
)
self.affirmative_answer_crew = EveAICrewAICrew(
self,
"Affirmative Answer Crew",
agents=recruitment_agents,
tasks=affirmative_answer_tasks,
verbose=verbose,
)
self.flow = SelectionFlow(
self,
self.rag_crew,
self.determination_crew,
self.affirmative_answer_crew
)
def execute(self, arguments: SpecialistArguments, formatted_context, citations) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist execution started", {})
self.arguments = arguments
self.formatted_context = formatted_context
self.citations = citations
self.log_tuning("Traicie Selection Specialist inputs", {
"Arguments": arguments.model_dump(),
"Formatted Context": formatted_context,
"Citations": citations,
"History": self._formatted_history
})
if not self._cached_session.interactions:
self.specialist_phase = "initial"
else:
self.specialist_phase = self._cached_session.interactions[-1].specialist_results.get('phase', 'initial')
self.previous_ai_question = self._cached_session.interactions[-1].specialist_results.get('ai_question', '')
self.previous_interview_phase = self._cached_session.interactions[-1].specialist_results.get('interview_phase', '')
results = None
match self.specialist_phase:
case "initial":
results = self.execute_initial_state()
case "start_selection_procedure":
results = self.execute_start_selection_procedure_state()
case "rag":
results = self.execute_rag_state()
case "ko_question_evaluation":
results = self.execute_ko_question_evaluation()
case "personal_contact_data_preparation":
results = self.execute_personal_contact_data_preparation()
case "personal_contact_data_processing":
results = self.execute_personal_contact_data_processing()
case "contact_time_evaluation":
results = self.execute_contact_time_evaluation_state()
case "no_valid_candidate":
results = self.execute_no_valid_candidate_state(arguments, formatted_context, citations)
self.log_tuning(f"Traicie Selection Specialist execution ended",
{"Results": results.model_dump() if results else "No info"})
return results
def execute_initial_state(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist initial_state execution started", {})
interaction_mode = self.arguments.interaction_mode
if not interaction_mode:
interaction_mode = "selection"
welcome_message = self.specialist.configuration.get("welcome_message", "Welcome to our selection process.")
welcome_message = TranslationServices.translate(self.tenant_id, welcome_message, self.arguments.language)
if interaction_mode == "selection":
return self.execute_start_selection_procedure_state(welcome_message)
# We are in orientation mode, so we give a standard message, and move to rag state
start_selection_question = TranslationServices.translate(self.tenant_id,
random.choice(START_SELECTION_QUESTIONS),
self.arguments.language)
self.flow.state.answer = f"{welcome_message}"
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "start_selection_procedure"
self.flow.state.ai_question = welcome_message
results = SelectionResult.create_for_type(self.type, self.type_version)
return results
def execute_start_selection_procedure_state(self, start_message=None) -> SpecialistResult:
initialisation_message = TranslationServices.translate(self.tenant_id,
random.choice(INITIALISATION_MESSAGES),
self.arguments.language)
if start_message:
answer = f"{start_message}\n\n{initialisation_message}"
else:
answer = initialisation_message
ko_questions = self._get_ko_questions()
current_ko_criterium = ko_questions.ko_questions[0].title
current_ko_criterium_idx = 0
ko_form = self._prepare_ko_question_form(ko_questions, current_ko_criterium, self.arguments.language)
self.flow.state.current_ko_criterium = current_ko_criterium
self.flow.state.current_ko_criterium_idx = current_ko_criterium_idx
self.flow.state.ko_criteria_scores = []
self.flow.state.answer = answer
self.flow.state.phase = "ko_question_evaluation"
self.flow.state.form_request = ko_form
results = SelectionResult.create_for_type(self.type, self.type_version)
return results
def execute_ko_question_evaluation(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist ko_question_evaluation started", {})
# Check if the form has been returned (it should)
if not self.arguments.form_values:
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
"No form values returned")
ko_questions = self._get_ko_questions()
previous_idx = self.flow.state.current_ko_criterium_idx
previous_ko_question = ko_questions.ko_questions[previous_idx]
# Evaluate KO Criteria
evaluation = "positive"
criterium, answer = next(iter(self.arguments.form_values.items()))
if TranslationServices.translate(self.tenant_id, previous_ko_question.answer_positive, self.arguments.language) != answer:
evaluation = "negative"
score = SelectionKOCriteriumScore(
criterium=criterium,
answer=answer,
score=1 if evaluation == "positive" else 0,
)
self.flow.state.ko_criteria_scores.append(score)
if evaluation == "negative":
answer = TranslationServices.translate(self.tenant_id,
random.choices(KO_CRITERIA_NOT_MET_MESSAGES),
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate"
results = SelectionResult.create_for_type(self.type, self.type_version)
else:
next_idx = previous_idx + 1
if next_idx < len(ko_questions.ko_questions): # There's still a KO criterium to be evaluated
next_ko_criterium = ko_questions.ko_questions[next_idx]
ko_form = self._prepare_ko_question_form(ko_questions, next_ko_criterium.title, self.arguments.language)
next_message = random.choice(KO_CRITERIA_NEXT_MESSAGES)
answer = TranslationServices.translate(self.tenant_id, next_message, self.arguments.language)
self.flow.state.answer = answer
self.flow.state.form_request = ko_form
self.flow.state.current_ko_criterium = next_ko_criterium.title
self.flow.state.current_ko_criterium_idx = next_idx
self.flow.state.phase = "ko_question_evaluation"
else: # All KO Criteria have been met
answer = TranslationServices.translate(self.tenant_id,
random.choice(KO_CRITERIA_MET_MESSAGES),
self.arguments.language)
self.flow.state.ai_question = answer
self.flow.state.answer = answer
self.flow.state.current_ko_criterium = ""
self.flow.state.current_ko_criterium_idx = None
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "personal_contact_data_preparation"
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_personal_contact_data_preparation(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_preparation started", {})
contact_form = cache_manager.specialist_forms_config_cache.get_config("MINIMAL_PERSONAL_CONTACT_FORM", "1.0")
contact_form = TranslationServices.translate_config(self.tenant_id, contact_form, "fields",
self.arguments.language)
answer = TranslationServices.translate(self.tenant_id, CONTACT_DATA_GUIDING_MESSAGE,
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.form_request = contact_form
self.flow.state.phase = "personal_contact_data_processing"
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_personal_contact_data_processing(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist personal_contact_data_processing started", {})
contact_time_question = TranslationServices.translate(self.tenant_id, CONTACT_TIME_QUESTION, self.arguments.language)
answer = (
f"{TranslationServices.translate(self.tenant_id, CONTACT_DATA_PROCESSED_MESSAGE, self.arguments.language)} "
f"{contact_time_question}")
time_pref_form = cache_manager.specialist_forms_config_cache.get_config("CONTACT_TIME_PREFERENCES_SIMPLE", "1.0")
time_pref_form = TranslationServices.translate_config(self.tenant_id, time_pref_form, "fields",
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.ai_question = contact_time_question
self.flow.state.interview_phase = "contact_time_evaluation"
self.flow.state.phase = "contact_time_evaluation"
self.flow.state.personal_contact_data = self.arguments.form_values
self.flow.state.form_request = time_pref_form
rqc_info = {
"ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
}
CapsuleServices.push_capsule_data(self._cached_session.id, "TRAICIE_RQC", "1.0", {}, rqc_info)
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_contact_time_evaluation_state(self) -> SpecialistResult:
self.log_tuning("Traicie Selection Specialist contact_time_evaluation started", {})
answer = TranslationServices.translate(self.tenant_id,
random.choice(CONTACT_TIME_PROCESSED_MESSAGES),
self.arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "candidate_selected"
self.flow.state.contact_time_prefs = self.arguments.form_values
rqc_info = {
"ko_criteria_scores": self.flow.state.ko_criteria_scores,
"personal_contact_data": self.flow.state.personal_contact_data,
"contact_time_prefs": self.flow.state.contact_time_prefs,
}
CapsuleServices.push_capsule_data(self._cached_session.id, "TRAICIE_RQC", "1.0", {}, rqc_info)
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_no_valid_candidate_state(self, arguments: SpecialistArguments, formatted_context, citations) \
-> SpecialistResult:
self.log_tuning("Traicie Selection Specialist no_valid_candidate started", {})
answer = TranslationServices.translate(self.tenant_id,
random.choice(KO_CRITERIA_NOT_MET_MESSAGES),
arguments.language)
self.flow.state.answer = answer
self.flow.state.phase = "no_valid_candidate"
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def execute_rag_state(self) -> None | SpecialistResult:
self.log_tuning("Traicie Selection Specialist rag_state started", {})
# Prepare & Execute Selection Flow
formatted_context, citations = self._retrieve_context(self.arguments)
self.flow.state.citations = citations
tone_of_voice = self.specialist.configuration.get('tone_of_voice', 'Professional & Neutral')
tone_of_voice_context = get_tone_of_voice_context(tone_of_voice)
language_level = self.specialist.configuration.get('language_level', 'Standard')
language_level_context = get_language_level_context(language_level)
flow_inputs = {
"language": self.arguments.language,
"question": self.arguments.question,
"context": formatted_context,
"history": self.formatted_history,
"name": self.specialist.configuration.get('name', ''),
"tone_of_voice": tone_of_voice,
"tone_of_voice_context": tone_of_voice_context,
"language_level": language_level,
"language_level_context": language_level_context,
}
flow_results = self.flow.kickoff(inputs=flow_inputs)
# Handle the results - stored in the state object
if self.flow.state.interview_mode == "RAG":
# In case of RAG mode, we get a rag_output
if self.flow.state.rag_output:
if self.flow.state.rag_output.insufficient_info:
answer = TranslationServices.translate(self.tenant_id,
random.choice(INSUFFICIENT_INFORMATION_MESSAGES),
self.arguments.language)
else:
answer = self.flow.state.rag_output.answer
else:
current_app.logger.error("No RAG output found in the state object!")
answer = TranslationServices.translate(self.tenant_id,
random.choice(INSUFFICIENT_INFORMATION_MESSAGES),
self.arguments.language)
interview_question = self._get_question_for_interview_phase()
self.flow.state.answer = f"{self.flow.state.rag_output.answer}\n{interview_question}"
self.flow.state.phase = "rag"
self.flow.state.interview_phase = self.previous_interview_phase
else: # self.flow.state.interview_mode == "CHECK"
if self.previous_interview_phase == "candidate_selected": # We blijven in RAG mode
interview_question = self._get_question_for_interview_phase()
self.flow.state.answer = interview_question
self.flow.state.phase = "rag"
self.flow.state.interview_phase = "candidate_selected"
else:
if self.flow.state.affirmative_answer:
return self._execute_next_interview_phase()
else:
self.flow.state.answer = self._respond_to_negative_answer()
self.flow.state.phase = "rag"
self.flow.state.interview_phase = self.previous_interview_phase
results = SelectionResult.create_for_type(self.type, self.type_version,)
return results
def _get_ko_questions(self) -> KOQuestions:
ko_questions_asset = db.session.query(EveAIAsset).filter(
EveAIAsset.type == "TRAICIE_KO_CRITERIA_QUESTIONS",
EveAIAsset.type_version == "1.0.0",
EveAIAsset.configuration.is_not(None),
EveAIAsset.configuration.has_key('specialist_id'),
EveAIAsset.configuration['specialist_id'].astext.cast(db.Integer) == self.specialist_id
).first()
if not ko_questions_asset:
raise EveAISpecialistExecutionError(self.tenant_id, self.specialist_id, self.session_id,
"No KO criteria questions found")
# Register Asset Usage
prompt_tokens = ko_questions_asset.prompt_tokens
completion_tokens = ko_questions_asset.completion_tokens
total_tokens = prompt_tokens + completion_tokens
metrics = {
'total_tokens': total_tokens,
'prompt_tokens': prompt_tokens,
'completion_tokens': completion_tokens,
'time_elapsed': 0,
'interaction_type': 'ASSET',
}
current_event.log_llm_metrics(metrics)
ko_questions_data = minio_client.download_asset_file(self.tenant_id, ko_questions_asset.bucket_name,
ko_questions_asset.object_name)
ko_questions = KOQuestions.from_json(ko_questions_data)
return ko_questions
def _prepare_ko_question_form(self, ko_questions: KOQuestions, current_ko_criterium: str, language: str) \
-> Dict[str, Any]:
fields = {}
ko_question = ko_questions.get_by_title(current_ko_criterium)
fields[ko_question.title] = {
"name": ko_question.title,
"description": ko_question.title,
"context": ko_question.question,
"type": "options",
"required": True,
"allowed_values": [ko_question.answer_positive, ko_question.answer_negative]
}
ko_form = {
"type": "KO_CRITERIA_FORM",
"version": "1.0.0",
"name": f"Starter Question: {current_ko_criterium}",
"icon": "verified",
"fields": fields,
}
ko_form = TranslationServices.translate_config(self.tenant_id, ko_form, "fields", language)
return ko_form
def _get_question_for_interview_phase(self) -> str:
question = None
match self.previous_interview_phase:
case "start_selection_procedure":
question = random.choice(START_SELECTION_QUESTIONS)
case "personal_contact_data_preparation":
question = random.choice(CONTACT_DATA_QUESTIONS)
case "candidate_selected":
question = random.choice(SUCCESSFUL_ENDING_MESSAGES)
translated_question = TranslationServices.translate(self.tenant_id, question, self.arguments.language)
return translated_question
def _respond_to_negative_answer(self) -> str:
question = None
match self.previous_interview_phase:
case "start_selection_procedure":
question = random.choice(TRY_TO_START_SELECTION_QUESTIONS)
case "personal_contact_data_preparation":
question = random.choice(NO_CONTACT_DATA_QUESTIONS)
translated_question = TranslationServices.translate(self.tenant_id, question, self.arguments.language)
return translated_question
def _execute_next_interview_phase(self) -> SpecialistResult | None:
match self.previous_interview_phase:
case "start_selection_procedure":
return self.execute_start_selection_procedure_state()
case "personal_contact_data_preparation":
return self.execute_personal_contact_data_preparation()
return None
class SelectionKOCriteriumScore(BaseModel):
criterium: Optional[str] = Field(None, alias="criterium")
answer: Optional[str] = Field(None, alias="answer")
score: Optional[int] = Field(None, alias="score")
class PersonalContactData(BaseModel):
name: str = Field(..., description="Your name", alias="name")
email: EmailStr = Field(..., description="Your Name", alias="email")
phone: str = Field(..., description="Your Phone Number", alias="phone")
address: Optional[str] = Field(None, description="Your Address", alias="address")
zip: Optional[str] = Field(None, description="Postal Code", alias="zip")
city: Optional[str] = Field(None, description="City", alias="city")
country: Optional[str] = Field(None, description="Country", alias="country")
consent: bool = Field(..., description="Consent", alias="consent")
class ContactTimePreferences(BaseModel):
early: Optional[bool] = Field(None, description="Early", alias="early")
late_morning: Optional[bool] = Field(None, description="Late Morning", alias="late_morning")
afternoon: Optional[bool] = Field(None, description="Afternoon", alias="afternoon")
evening: Optional[bool] = Field(None, description="Evening", alias="evening")
other: Optional[str] = Field(None, description="Other", alias="other")
class SelectionInput(BaseModel):
# RAG elements
language: Optional[str] = Field(None, alias="language")
question: Optional[str] = Field(None, alias="question")
context: Optional[str] = Field(None, alias="context")
citations: Optional[List[int]] = Field(None, alias="citations")
history: Optional[str] = Field(None, alias="history")
name: Optional[str] = Field(None, alias="name")
# Selection elements
region: Optional[str] = Field(None, alias="region")
working_schedule: Optional[str] = Field(None, alias="working_schedule")
start_date: Optional[date] = Field(None, alias="vacancy_text")
interaction_mode: Optional[str] = Field(None, alias="interaction_mode")
tone_of_voice: Optional[str] = Field(None, alias="tone_of_voice")
tone_of_voice_context: Optional[str] = Field(None, alias="tone_of_voice_context")
language_level: Optional[str] = Field(None, alias="language_level")
language_level_context: Optional[str] = Field(None, alias="language_level_context")
ko_criteria: Optional[List[Dict[str, str]]] = Field(None, alias="ko_criteria")
field_values: Optional[Dict[str, Any]] = Field(None, alias="field_values")
class SelectionFlowState(EveAIFlowState):
"""Flow state for RAG specialist that automatically updates from task outputs"""
input: Optional[SelectionInput] = None
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = None
current_ko_criterium: Optional[str] = None
current_ko_criterium_idx: Optional[int] = None
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = None
personal_contact_data: Optional[PersonalContactData] = None
contact_time_prefs: Optional[ContactTimePreferences] = None
citations: Optional[List[Dict[str, Any]]] = None
interview_phase: Optional[str] = None
interview_mode: Optional[str] = None
affirmative_answer: Optional[bool] = None
class SelectionResult(SpecialistResult):
ai_question: Optional[str] = None
rag_output: Optional[RAGOutput] = Field(None, alias="rag_output")
ko_criteria_scores: Optional[List[SelectionKOCriteriumScore]] = Field(None, alias="ko_criteria_scores")
personal_contact_data: Optional[PersonalContactData] = Field(None, alias="personal_contact_data")
contact_time_prefs: Optional[ContactTimePreferences] = None
interview_phase: Optional[str] = None
interview_mode: Optional[str] = None
affirmative_answer: Optional[bool] = None
class SelectionFlow(EveAICrewAIFlow[SelectionFlowState]):
def __init__(self,
specialist_executor: CrewAIBaseSpecialistExecutor,
rag_crew: EveAICrewAICrew,
determination_crew: EveAICrewAICrew,
affirmative_answer_crew: EveAICrewAICrew,
**kwargs):
super().__init__(specialist_executor, "Selection Specialist Flow", **kwargs)
self.specialist_executor = specialist_executor
self.rag_crew = rag_crew
self.determination_crew = determination_crew
self.affirmative_answer_crew = affirmative_answer_crew
self.exception_raised = False
@start()
def process_inputs(self):
return ""
@listen(process_inputs)
async def execute_determination(self):
try:
inputs = self.state.input.model_dump()
crew_output = await self.determination_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("Determination Crew Output", crew_output.model_dump())
output_pydantic = crew_output.pydantic
if not output_pydantic:
raw_json = json.loads(crew_output.raw)
output_pydantic = SelectionResult(**raw_json)
self.state.interview_mode = output_pydantic.mode
return output_pydantic
except Exception as e:
current_app.logger.error(f"Determination Crew Error: {e}")
self.exception_raised = True
raise e
@router(execute_determination)
def interview_mode_routing(self):
# interview mode can be RAG or CHECK
return self.state.interview_mode
@listen("RAG")
async def execute_rag(self):
try:
inputs = self.state.input.model_dump()
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("Advanced RAG Crew Output", crew_output.model_dump())
output_pydantic = crew_output.pydantic
if not output_pydantic:
raw_json = json.loads(crew_output.raw)
output_pydantic = RAGOutput.model_validate(raw_json)
self.state.rag_output = output_pydantic
return output_pydantic
except Exception as e:
current_app.logger.error(f"CREW rag_crew Error: {str(e)}")
self.exception_raised = True
raise e
@listen("CHECK")
async def check_affirmative_answer(self):
try:
inputs = self.state.input.model_dump()
crew_output = await self.affirmative_answer_crew.kickoff_async(inputs=inputs)
self.specialist_executor.log_tuning("Traicie Check Affirmative Answer Crew Output", crew_output.model_dump())
output_pydantic = crew_output.pydantic
if not output_pydantic:
raw_json = json.loads(crew_output.raw)
output_pydantic = RAGOutput.model_validate(raw_json)
self.state.affirmative_answer = output_pydantic.affirmative
return output_pydantic
except Exception as e:
current_app.logger.error(f"CREW rag_crew Error: {str(e)}")
self.exception_raised = True
raise e
async def kickoff_async(self, inputs=None):
self.state.input = SelectionInput.model_validate(inputs)
result = await super().kickoff_async(inputs)
self.specialist_executor.log_tuning("Specialist Executor Output", self.state.model_dump())
return self.state

View File

@@ -36,14 +36,16 @@
] ]
}, },
"scripts": { "scripts": {
"prebuild": "mkdir -p static/dist && cp -r ../eveai_app/static/assets static/", "prebuild": "mkdir -p static/dist && npm run sync-assets",
"sync-assets": "rsync -av ../eveai_app/static/assets/ static/assets/ && rsync -av ../eveai_chat_client/static/assets/ static/assets/",
"build": "npm run prebuild && npm run build:main && npm run build:chat", "build": "npm run prebuild && npm run build:main && npm run build:chat",
"build:main": "parcel build frontend_src/js/main.js --dist-dir static/dist --public-url /static/dist/ --no-source-maps", "build:main": "parcel build frontend_src/js/main.js --dist-dir static/dist --public-url /static/dist/ --no-source-maps",
"build:chat": "parcel build frontend_src/js/chat-client.js --dist-dir static/dist --public-url /static/dist/ --no-source-maps", "build:chat": "parcel build frontend_src/js/chat-client.js --dist-dir static/dist --public-url /static/dist/ --no-source-maps",
"predev": "mkdir -p static/dist && cp -r ../eveai_app/static/assets static/", "predev": "mkdir -p static/dist && npm run sync-assets",
"dev": "npm run predev && parcel frontend_src/js/main.js --dist-dir static/dist --public-url /static/dist/ & parcel frontend_src/js/chat-client.js --dist-dir static/dist --public-url /static/dist/", "dev": "npm run predev && parcel frontend_src/js/main.js --dist-dir static/dist --public-url /static/dist/ & parcel frontend_src/js/chat-client.js --dist-dir static/dist --public-url /static/dist/",
"prewatch": "mkdir -p static/dist && cp -r ../eveai_app/static/assets static/", "prewatch": "mkdir -p static/dist && npm run sync-assets",
"watch": "npm run prewatch && parcel watch frontend_src/js/main.js --dist-dir static/dist --public-url /static/dist/ & parcel watch frontend_src/js/chat-client.js --dist-dir static/dist --public-url /static/dist/", "watch": "npm run prewatch && parcel watch frontend_src/js/main.js --dist-dir static/dist --public-url /static/dist/ & parcel watch frontend_src/js/chat-client.js --dist-dir static/dist --public-url /static/dist/",
"clean": "rm -rf static/dist/* static/assets .parcel-cache" "clean": "rm -rf static/dist/* static/assets .parcel-cache"
} }
} }

View File

@@ -82,7 +82,7 @@ typing_extensions~=4.12.2
babel~=2.16.0 babel~=2.16.0
dogpile.cache~=1.3.3 dogpile.cache~=1.3.3
python-docx~=1.1.2 python-docx~=1.1.2
crewai~=0.140.0 crewai~=0.152.0
sseclient~=0.0.27 sseclient~=0.0.27
termcolor~=2.5.0 termcolor~=2.5.0
mistral-common~=1.5.5 mistral-common~=1.5.5