Started to work on interaction views. However, need a quick check in because of a python upgrade systemwide that breaks code.

This commit is contained in:
Josako
2024-06-21 09:52:06 +02:00
parent c5370c8026
commit cc9f6c95aa
19 changed files with 553 additions and 112 deletions

3
.idea/eveAI.iml generated
View File

@@ -6,8 +6,9 @@
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$"> <content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.venv" /> <excludeFolder url="file://$MODULE_DIR$/.venv" />
<excludeFolder url="file://$MODULE_DIR$/.venv2" />
</content> </content>
<orderEntry type="inheritedJdk" /> <orderEntry type="jdk" jdkName="Python 3.12 (eveAI)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="TemplatesService"> <component name="TemplatesService">

View File

@@ -9,6 +9,7 @@ class ChatSession(db.Model):
session_id = db.Column(db.String(36), nullable=True) session_id = db.Column(db.String(36), nullable=True)
session_start = db.Column(db.DateTime, nullable=False) session_start = db.Column(db.DateTime, nullable=False)
session_end = db.Column(db.DateTime, nullable=True) session_end = db.Column(db.DateTime, nullable=True)
timezone = db.Column(db.String(30), nullable=True)
# Relations # Relations
interactions = db.relationship('Interaction', backref='chat_session', lazy=True) interactions = db.relationship('Interaction', backref='chat_session', lazy=True)
@@ -25,6 +26,7 @@ class Interaction(db.Model):
answer = db.Column(db.Text, nullable=True) answer = db.Column(db.Text, nullable=True)
algorithm_used = db.Column(db.String(20), nullable=True) algorithm_used = db.Column(db.String(20), nullable=True)
language = db.Column(db.String(2), nullable=False) language = db.Column(db.String(2), nullable=False)
timezone = db.Column(db.String(30), nullable=True)
appreciation = db.Column(db.Integer, nullable=True) appreciation = db.Column(db.Integer, nullable=True)
# Timing information # Timing information

View File

@@ -21,6 +21,10 @@ class CitedAnswer(BaseModel):
..., ...,
description="The integer IDs of the SPECIFIC sources that were used to generate the answer" description="The integer IDs of the SPECIFIC sources that were used to generate the answer"
) )
insufficient_info: bool = Field(
False, # Default value is set to False
description="A boolean indicating wether given sources were sufficient or not to generate the answer"
)
def set_language_prompt_template(cls, language_prompt): def set_language_prompt_template(cls, language_prompt):
@@ -112,17 +116,20 @@ def select_model_variables(tenant):
summary_template = current_app.config.get('GPT4_SUMMARY_TEMPLATE') summary_template = current_app.config.get('GPT4_SUMMARY_TEMPLATE')
rag_template = current_app.config.get('GPT4_RAG_TEMPLATE') rag_template = current_app.config.get('GPT4_RAG_TEMPLATE')
history_template = current_app.config.get('GPT4_HISTORY_TEMPLATE') history_template = current_app.config.get('GPT4_HISTORY_TEMPLATE')
encyclopedia_template = current_app.config.get('GPT4_ENCYCLOPEDIA_TEMPLATE')
tool_calling_supported = True tool_calling_supported = True
case 'gpt-3-5-turbo': case 'gpt-3-5-turbo':
summary_template = current_app.config.get('GPT3_5_SUMMARY_TEMPLATE') summary_template = current_app.config.get('GPT3_5_SUMMARY_TEMPLATE')
rag_template = current_app.config.get('GPT3_5_RAG_TEMPLATE') rag_template = current_app.config.get('GPT3_5_RAG_TEMPLATE')
history_template = current_app.config.get('GPT3_5_HISTORY_TEMPLATE') history_template = current_app.config.get('GPT3_5_HISTORY_TEMPLATE')
encyclopedia_template = current_app.config.get('GPT3_5_ENCYCLOPEDIA_TEMPLATE')
case _: case _:
raise Exception(f'Error setting model variables for tenant {tenant.id} ' raise Exception(f'Error setting model variables for tenant {tenant.id} '
f'error: Invalid chat model') f'error: Invalid chat model')
model_variables['summary_template'] = summary_template model_variables['summary_template'] = summary_template
model_variables['rag_template'] = rag_template model_variables['rag_template'] = rag_template
model_variables['history_template'] = history_template model_variables['history_template'] = history_template
model_variables['encyclopedia_template'] = encyclopedia_template
if tool_calling_supported: if tool_calling_supported:
model_variables['cited_answer_cls'] = CitedAnswer model_variables['cited_answer_cls'] = CitedAnswer
case _: case _:

View File

@@ -0,0 +1,37 @@
# common/utils/filters.py
import pytz
from datetime import datetime
def to_local_time(utc_dt, timezone_str):
"""
Converts a UTC datetime to a local datetime based on the provided timezone string.
"""
if not utc_dt:
return "N/A"
local_tz = pytz.timezone(timezone_str)
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z')
def time_difference(start_dt, end_dt):
"""
Returns the time difference between two datetimes as a string.
If end_dt is None, returns "Ongoing".
"""
if not start_dt:
return "N/A"
if end_dt:
delta = end_dt - start_dt
# Customize the formatting as needed
return str(delta)
return "Ongoing"
def register_filters(app):
"""
Registers custom filters with the Flask app.
"""
app.jinja_env.filters['to_local_time'] = to_local_time
app.jinja_env.filters['time_difference'] = time_difference

View File

@@ -100,19 +100,28 @@ class Config(object):
{tenant_context} {tenant_context}
The context is a conversation history, with the HUMAN asking questions, the AI answering questions. The context is a conversation history, with the HUMAN asking questions, the AI answering questions.
The history is delimited between triple backquotes. The history is delimited between triple backquotes.
Your answer by stating the question in {language}. You answer by stating the question in {language}.
History: History:
```{history}``` ```{history}```
Question to be detailed: Question to be detailed:
{question}""" {question}"""
# Fallback Algorithms GPT3_5_HISTORY_TEMPLATE = """You are a helpful assistant that details a question based on a previous context,
FALLBACK_ALGORITHMS = [ in such a way that the question is understandable without the previous context.
"RAG_TENANT", {tenant_context}
"RAG_WIKIPEDIA", The context is a conversation history, with the HUMAN asking questions, the AI answering questions.
"RAG_GOOGLE", The history is delimited between triple backquotes.
"LLM" You answer by stating the question in {language}.
] History:
```{history}```
Question to be detailed:
{question}"""
GPT4_ENCYCLOPEDIA_TEMPLATE = """You have a lot of background knowledge, and as such you are some kind of
'encyclopedia' to explain general terminology. Only answer if you have a clear understanding of the question.
If not, say you do not have sufficient information to answer the question. Use the {language} in your communication.
Question:
{question}"""
# SocketIO settings # SocketIO settings
# SOCKETIO_ASYNC_MODE = 'threading' # SOCKETIO_ASYNC_MODE = 'threading'
@@ -125,6 +134,14 @@ class Config(object):
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60) PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
SESSION_REFRESH_EACH_REQUEST = True SESSION_REFRESH_EACH_REQUEST = True
# Fallback Algorithms
FALLBACK_ALGORITHMS = [
"RAG_TENANT",
"RAG_WIKIPEDIA",
"RAG_GOOGLE",
"LLM"
]
# Interaction algorithms # Interaction algorithms
INTERACTION_ALGORITHMS = { INTERACTION_ALGORITHMS = {
"RAG_TENANT": {"name": "RAG_TENANT", "description": "Algorithm using only information provided by the tenant"}, "RAG_TENANT": {"name": "RAG_TENANT", "description": "Algorithm using only information provided by the tenant"},

View File

@@ -13,8 +13,7 @@ from config.logging_config import LOGGING
from common.utils.security import set_tenant_session_data from common.utils.security import set_tenant_session_data
from .errors import register_error_handlers from .errors import register_error_handlers
from common.utils.celery_utils import make_celery, init_celery from common.utils.celery_utils import make_celery, init_celery
from common.utils.debug_utils import log_request_middleware from common.utils.template_filters import register_filters
from common.utils.nginx_utils import prefixed_url_for
def create_app(config_file=None): def create_app(config_file=None):
@@ -85,6 +84,9 @@ def create_app(config_file=None):
# Register API # Register API
register_api(app) register_api(app)
# Register template filters
register_filters(app)
app.logger.info("EveAI App Server Started Successfully") app.logger.info("EveAI App Server Started Successfully")
app.logger.info("-------------------------------------------------------------------------------------------------") app.logger.info("-------------------------------------------------------------------------------------------------")
return app return app
@@ -112,6 +114,8 @@ def register_blueprints(app):
app.register_blueprint(document_bp) app.register_blueprint(document_bp)
from .views.security_views import security_bp from .views.security_views import security_bp
app.register_blueprint(security_bp) app.register_blueprint(security_bp)
from .views.interaction_views import interaction_bp
app.register_blueprint(interaction_bp)
def register_api(app): def register_api(app):

View File

@@ -0,0 +1,23 @@
{% extends 'base.html' %}
{% from 'macros.html' import render_selectable_table, render_pagination %}
{% block title %}Chat Sessions{% endblock %}
{% block content_title %}Chat Sessions{% endblock %}
{% block content_description %}View Chat Sessions for Tenant{% endblock %}
{% block content_class %}<div class="col-xl-12 col-lg-5 col-md-7 mx-auto"></div>{% endblock %}
{% block content %}
<div class="container">
<form method="POST" action="{{ url_for('interaction_bp.handle_chat_session_selection') }}">
{{ render_selectable_table(headers=["ID", "Session ID", "Session Start", "Session End"], rows=rows, selectable=True, id="documentsTable") }}
<div class="form-group mt-3">
<button type="submit" name="action" value="view_chat_session" class="btn btn-primary">View Chat Session</button>
</div>
</form>
</div>
{% endblock %}
{% block content_footer %}
{{ render_pagination(pagination, 'interaction_bp.chat_sessions') }}
{% endblock %}

View File

@@ -0,0 +1,127 @@
{% extends "base.html" %}
{% block content %}
<div class="container mt-5">
<h2>Chat Session Details</h2>
<!-- Session Information -->
<div class="card mb-4">
<div class="card-header">
<h5>Session Information</h5>
<!-- Timezone Toggle Buttons -->
<div class="btn-group" role="group">
<button type="button" class="btn btn-primary" id="toggle-interaction-timezone">Interaction Timezone</button>
<button type="button" class="btn btn-secondary" id="toggle-admin-timezone">Admin Timezone</button>
</div>
</div>
<div class="card-body">
<dl class="row">
<dt class="col-sm-3">Session ID:</dt>
<dd class="col-sm-9">{{ chat_session.session_id }}</dd>
<dt class="col-sm-3">Session Start:</dt>
<dd class="col-sm-9">
<span class="timezone interaction-timezone">{{ chat_session.session_start | to_local_time(chat_session.timezone) }}</span>
<span class="timezone admin-timezone d-none">{{ chat_session.session_start | to_local_time(session['admin_user_timezone']) }}</span>
</dd>
<dt class="col-sm-3">Session End:</dt>
<dd class="col-sm-9">
{% if chat_session.session_end %}
<span class="timezone interaction-timezone">{{ chat_session.session_end | to_local_time(chat_session.timezone) }}</span>
<span class="timezone admin-timezone d-none">{{ chat_session.session_end | to_local_time(session['admin_user_timezone']) }}</span>
{% else %}
Ongoing
{% endif %}
</dd>
</dl>
</div>
</div>
<!-- Interactions List -->
<div class="card mb-4">
<div class="card-header">
<h5>Interactions</h5>
</div>
<div class="card-body">
{% for interaction in interactions %}
<div class="interaction mb-3">
<div class="card">
<div class="card-header d-flex justify-content-between">
<span>Question:</span>
<span class="text-muted">
<span class="timezone interaction-timezone">{{ interaction.question_at | to_local_time(interaction.timezone) }}</span>
<span class="timezone admin-timezone d-none">{{ interaction.question_at | to_local_time(session['admin_user_timezone']) }}</span>
-
<span class="timezone interaction-timezone">{{ interaction.answer_at | to_local_time(interaction.timezone) }}</span>
<span class="timezone admin-timezone d-none">{{ interaction.answer_at | to_local_time(session['admin_user_timezone']) }}</span>
({{ interaction.question_at | time_difference(interaction.answer_at) }})
</span>
</div>
<div class="card-body">
<p><strong>Question:</strong> {{ interaction.question }}</p>
<p><strong>Answer:</strong> {{ interaction.answer }}</p>
<p>
<strong>Algorithm Used:</strong>
<i class="material-icons {{ 'fingerprint-rag-' ~ interaction.algorithm_used.lower() }}">
fingerprint
</i> {{ interaction.algorithm_used }}
</p>
<p>
<strong>Appreciation:</strong>
<i class="material-icons thumb-icon {{ 'thumb_up' if interaction.appreciation == 1 else 'thumb_down' }}">
{{ 'thumb_up' if interaction.appreciation == 1 else 'thumb_down' }}
</i>
</p>
<p><strong>Embeddings:</strong>
{% if interaction.embeddings %}
{% for embedding in interaction.embeddings %}
<a href="{{ url_for('interaction_bp.view_embedding', embedding_id=embedding.embedding_id) }}" class="badge badge-info">
{{ embedding.embedding_id }}
</a>
{% endfor %}
{% else %}
None
{% endif %}
</p>
</div>
</div>
</div>
{% endfor %}
</div>
</div>
</div>
{% endblock %}
{% block scripts %}
<script>
document.addEventListener('DOMContentLoaded', function() {
// Elements to toggle
const interactionTimes = document.querySelectorAll('.interaction-timezone');
const adminTimes = document.querySelectorAll('.admin-timezone');
// Buttons
const interactionButton = document.getElementById('toggle-interaction-timezone');
const adminButton = document.getElementById('toggle-admin-timezone');
// Toggle to Interaction Timezone
interactionButton.addEventListener('click', function() {
interactionTimes.forEach(el => el.classList.remove('d-none'));
adminTimes.forEach(el => el.classList.add('d-none'));
interactionButton.classList.add('btn-primary');
interactionButton.classList.remove('btn-secondary');
adminButton.classList.add('btn-secondary');
adminButton.classList.remove('btn-primary');
});
// Toggle to Admin Timezone
adminButton.addEventListener('click', function() {
interactionTimes.forEach(el => el.classList.add('d-none'));
adminTimes.forEach(el => el.classList.remove('d-none'));
interactionButton.classList.add('btn-secondary');
interactionButton.classList.remove('btn-primary');
adminButton.classList.add('btn-primary');
adminButton.classList.remove('btn-secondary');
});
});
</script>
{% endblock %}

View File

@@ -79,7 +79,7 @@
{'name': 'User Registration', 'url': '/user/user', 'roles': ['Super User', 'Tenant Admin']}, {'name': 'User Registration', 'url': '/user/user', 'roles': ['Super User', 'Tenant Admin']},
]) }} ]) }}
{% endif %} {% endif %}
{% if current_user.is_authenticated %} {% if current_user.is_authenticated %}
{{ dropdown('Document Mgmt', 'contacts', [ {{ dropdown('Document Mgmt', 'contacts', [
{'name': 'Add Document', 'url': '/document/add_document', 'roles': ['Super User', 'Tenant Admin']}, {'name': 'Add Document', 'url': '/document/add_document', 'roles': ['Super User', 'Tenant Admin']},
{'name': 'Add URL', 'url': '/document/add_url', 'roles': ['Super User', 'Tenant Admin']}, {'name': 'Add URL', 'url': '/document/add_url', 'roles': ['Super User', 'Tenant Admin']},
@@ -87,6 +87,11 @@
{'name': 'Library Operations', 'url': '/document/library_operations', 'roles': ['Super User', 'Tenant Admin']}, {'name': 'Library Operations', 'url': '/document/library_operations', 'roles': ['Super User', 'Tenant Admin']},
]) }} ]) }}
{% endif %} {% endif %}
{% if current_user.is_authenticated %}
{{ dropdown('Interactions', 'contacts', [
{'name': 'Chat Sessions', 'url': '/interaction/chat_sessions', 'roles': ['Super User', 'Tenant Admin']},
]) }}
{% endif %}
{% if current_user.is_authenticated %} {% if current_user.is_authenticated %}
{{ dropdown(current_user.user_name, 'contacts', [ {{ dropdown(current_user.user_name, 'contacts', [
{'name': 'Session Defaults', 'url': '/session_defaults', 'roles': ['Super User', 'Tenant Admin']}, {'name': 'Session Defaults', 'url': '/session_defaults', 'roles': ['Super User', 'Tenant Admin']},

View File

@@ -170,4 +170,26 @@
}); });
} }
</script> </script>
<script>
// JavaScript to detect user's timezone
document.addEventListener('DOMContentLoaded', (event) => {
// Detect timezone
const userTimezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
// Send timezone to the server via a POST request
fetch('/set_user_timezone', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ timezone: userTimezone })
}).then(response => {
if (response.ok) {
console.log('Timezone sent to server successfully');
} else {
console.error('Failed to send timezone to server');
}
});
});
</script>
{% endblock %} {% endblock %}

View File

@@ -1,4 +1,4 @@
from flask import request, redirect, url_for, flash, render_template, Blueprint, session, current_app from flask import request, render_template, Blueprint, session, current_app, jsonify
from flask_security import roles_required, roles_accepted from flask_security import roles_required, roles_accepted
from .basic_forms import SessionDefaultsForm from .basic_forms import SessionDefaultsForm
@@ -41,3 +41,16 @@ def session_defaults():
session['default_language'] = form.default_language.data session['default_language'] = form.default_language.data
return render_template('basic/session_defaults.html', form=form) return render_template('basic/session_defaults.html', form=form)
@basic_bp.route('/set_user_timezone', methods=['POST'])
def set_user_timezone():
data = request.get_json()
timezone = data.get('timezone')
if timezone:
session['admin_user_timezone'] = timezone
return jsonify({'status': 'success', 'timezone': timezone}), 200
else:
return jsonify({'status': 'error', 'message': 'Timezone not provided'}), 400

View File

@@ -0,0 +1,100 @@
import ast
import os
from datetime import datetime as dt, timezone as tz
import chardet
from flask import request, redirect, flash, render_template, Blueprint, session, current_app
from flask_security import roles_accepted, current_user
from sqlalchemy import desc
from sqlalchemy.orm import joinedload
from werkzeug.datastructures import FileStorage
from werkzeug.utils import secure_filename
from sqlalchemy.exc import SQLAlchemyError
import requests
from requests.exceptions import SSLError
from urllib.parse import urlparse
import io
from common.models.interaction import ChatSession, Interaction
from common.extensions import db
from .document_forms import AddDocumentForm, AddURLForm, EditDocumentForm, EditDocumentVersionForm
from common.utils.middleware import mw_before_request
from common.utils.celery_utils import current_celery
from common.utils.nginx_utils import prefixed_url_for
from common.utils.view_assistants import form_validation_failed, prepare_table_for_macro
interaction_bp = Blueprint('interaction_bp', __name__, url_prefix='/interaction')
@interaction_bp.before_request
def log_before_request():
current_app.logger.debug(f"Before request (interaction_bp): {request.method} {request.url}")
@interaction_bp.after_request
def log_after_request(response):
current_app.logger.debug(
f"After request (interaction_bp): {request.method} {request.url} - Status: {response.status}")
return response
@interaction_bp.before_request
def before_request():
try:
mw_before_request()
except Exception as e:
current_app.logger.error(f'Error switching schema in Interaction Blueprint: {e}')
for role in current_user.roles:
current_app.logger.debug(f'User {current_user.email} has role {role.name}')
raise
@interaction_bp.route('/chat_sessions', methods=['GET', 'POST'])
def chat_sessions():
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 10, type=int)
query = ChatSession.query.order_by(desc(ChatSession.session_start))
pagination = query.paginate(page=page, per_page=per_page, error_out=False)
docs = pagination.items
rows = prepare_table_for_macro(docs, [('id', ''), ('session_id', ''), ('session_start', ''), ('session_end', '')])
return render_template('interaction/chat_sessions.html', rows=rows, pagination=pagination)
@interaction_bp.route('/handle_chat_session_selection', methods=['POST'])
@roles_accepted('Super User', 'Tenant Admin')
def handle_chat_session_selection():
chat_session_identification = request.form['selected_row']
cs_id = ast.literal_eval(chat_session_identification).get('value')
action = request.form['action']
match action:
case 'view_chat_session':
return redirect(prefixed_url_for('interaction_bp.view_chat_session', chat_session_id=cs_id))
# Add more conditions for other actions
return redirect(prefixed_url_for('interaction_bp.chat_sessions'))
@interaction_bp.route('/view_chat_session/<chat_session_id>', methods=['GET'])
@roles_accepted('Super User', 'Tenant Admin')
def view_chat_session(chat_session_id):
chat_session = ChatSession.query.get_or_404(chat_session_id)
show_chat_session(chat_session)
@interaction_bp.route('/view_chat_session_by_session_id/<session_id>', methods=['GET'])
@roles_accepted('Super User', 'Tenant Admin')
def view_chat_session_by_session_id(session_id):
chat_session = ChatSession.query.filter_by(session_id=session_id).first_or_404()
show_chat_session(chat_session)
def show_chat_session(chat_session):
interactions = Interaction.query.filter_by(chat_session_id=chat_session.id).all()
return render_template('interaction/view_chat_session.html', chat_session=chat_session, interactions=interactions)

View File

@@ -60,7 +60,8 @@ class TenantForm(FlaskForm):
self.embedding_model.choices = [(model, model) for model in current_app.config['SUPPORTED_EMBEDDINGS']] self.embedding_model.choices = [(model, model) for model in current_app.config['SUPPORTED_EMBEDDINGS']]
self.llm_model.choices = [(model, model) for model in current_app.config['SUPPORTED_LLMS']] self.llm_model.choices = [(model, model) for model in current_app.config['SUPPORTED_LLMS']]
# Initialize fallback algorithms # Initialize fallback algorithms
self.fallback_algorithms.choices = [(algorithm, algorithm.lower()) for algorithm in current_app.config['FALLBACK_ALGORITHMS']] self.fallback_algorithms.choices = \
[(algorithm, algorithm.lower()) for algorithm in current_app.config['FALLBACK_ALGORITHMS']]
class BaseUserForm(FlaskForm): class BaseUserForm(FlaskForm):

View File

@@ -67,6 +67,7 @@ def handle_message(data):
data['message'], data['message'],
data['language'], data['language'],
session['session_id'], session['session_id'],
data['timezone']
]) ])
current_app.logger.debug(f'SocketIO: Message offloading for tenant {current_tenant_id}, ' current_app.logger.debug(f'SocketIO: Message offloading for tenant {current_tenant_id}, '
f'Question: {task.id}') f'Question: {task.id}')

View File

@@ -18,7 +18,7 @@ from langchain_core.exceptions import LangChainException
from common.utils.database import Database from common.utils.database import Database
from common.models.document import DocumentVersion, EmbeddingMistral, EmbeddingSmallOpenAI, Embedding from common.models.document import DocumentVersion, EmbeddingMistral, EmbeddingSmallOpenAI, Embedding
from common.models.user import Tenant from common.models.user import Tenant
from common.models.interaction import ChatSession, Interaction, InteractionEmbedding from common.models.interaction import ChatSession, Interaction, InteractionEmbedding
from common.extensions import db from common.extensions import db
from common.utils.celery_utils import current_celery from common.utils.celery_utils import current_celery
from common.utils.model_utils import select_model_variables, create_language_template, replace_variable_in_template from common.utils.model_utils import select_model_variables, create_language_template, replace_variable_in_template
@@ -33,12 +33,11 @@ def detail_question(question, language, model_variables, session_id):
language_template = create_language_template(template, language) language_template = create_language_template(template, language)
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context']) full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
history_prompt = ChatPromptTemplate.from_template(full_template) history_prompt = ChatPromptTemplate.from_template(full_template)
setup_and_retrieval = RunnableParallel({"history": retriever,"question": RunnablePassthrough()}) setup_and_retrieval = RunnableParallel({"history": retriever, "question": RunnablePassthrough()})
output_parser = StrOutputParser() output_parser = StrOutputParser()
chain = setup_and_retrieval | history_prompt | llm | output_parser chain = setup_and_retrieval | history_prompt | llm | output_parser
try: try:
answer = chain.invoke(question) answer = chain.invoke(question)
return answer return answer
@@ -48,7 +47,7 @@ def detail_question(question, language, model_variables, session_id):
@current_celery.task(name='ask_question', queue='llm_interactions') @current_celery.task(name='ask_question', queue='llm_interactions')
def ask_question(tenant_id, question, language, session_id): def ask_question(tenant_id, question, language, session_id, user_timezone):
"""returns result structured as follows: """returns result structured as follows:
result = { result = {
'answer': 'Your answer here', 'answer': 'Your answer here',
@@ -75,103 +74,178 @@ def ask_question(tenant_id, question, language, session_id):
chat_session = ChatSession() chat_session = ChatSession()
chat_session.session_id = session_id chat_session.session_id = session_id
chat_session.session_start = dt.now(tz.utc) chat_session.session_start = dt.now(tz.utc)
chat_session.timezone = user_timezone
db.session.add(chat_session) db.session.add(chat_session)
db.session.commit() db.session.commit()
except SQLAlchemyError as e: except SQLAlchemyError as e:
current_app.logger.error(f'ask_question: Error initializing chat session in database: {e}') current_app.logger.error(f'ask_question: Error initializing chat session in database: {e}')
raise raise
new_interaction = Interaction() result, interaction = answer_using_tenant_rag(question, language, tenant, chat_session)
new_interaction.question = question
new_interaction.language = language
new_interaction.appreciation = None
new_interaction.chat_session_id = chat_session.id
new_interaction.question_at = dt.now(tz.utc)
new_interaction.algorithm_used = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
# Select variables to work with depending on tenant model
model_variables = select_model_variables(tenant)
tenant_info = tenant.to_dict()
# Langchain debugging if required
# set_debug(True)
detailed_question = detail_question(question, language, model_variables, session_id)
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
new_interaction.detailed_question = detailed_question
new_interaction.detailed_question_at = dt.now(tz.utc)
retriever = EveAIRetriever(model_variables, tenant_info)
llm = model_variables['llm']
template = model_variables['rag_template']
language_template = create_language_template(template, language)
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
rag_prompt = ChatPromptTemplate.from_template(full_template)
setup_and_retrieval = RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
new_interaction_embeddings = []
if not model_variables['cited_answer_cls']: # The model doesn't support structured feedback
output_parser = StrOutputParser()
chain = setup_and_retrieval | rag_prompt | llm | output_parser
# Invoke the chain with the actual question
answer = chain.invoke(detailed_question)
new_interaction.answer = answer
result = {
'answer': answer,
'citations': []
}
else: # The model supports structured feedback
structured_llm = llm.with_structured_output(model_variables['cited_answer_cls'])
chain = setup_and_retrieval | rag_prompt | structured_llm
result = chain.invoke(detailed_question).dict()
current_app.logger.debug(f'ask_question: result answer: {result['answer']}')
current_app.logger.debug(f'ask_question: result citations: {result["citations"]}')
new_interaction.answer = result['answer']
# Filter out the existing Embedding IDs
given_embedding_ids = [int(emb_id) for emb_id in result['citations']]
embeddings = (
db.session.query(Embedding)
.filter(Embedding.id.in_(given_embedding_ids))
.all()
)
existing_embedding_ids = [emb.id for emb in embeddings]
urls = [emb.document_version.url for emb in embeddings]
for emb_id in existing_embedding_ids:
new_interaction_embedding = InteractionEmbedding(embedding_id=emb_id)
new_interaction_embedding.interaction = new_interaction
new_interaction_embeddings.append(new_interaction_embedding)
result['citations'] = urls
new_interaction.answer_at = dt.now(tz.utc)
chat_session.session_end = dt.now(tz.utc)
try:
db.session.add(chat_session)
db.session.add(new_interaction)
db.session.add_all(new_interaction_embeddings)
db.session.commit()
except SQLAlchemyError as e:
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
raise
# Disable langchain debugging if set above.
# set_debug(False)
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name'] result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
result['interaction_id'] = new_interaction.id result['interaction_id'] = interaction.id
if result['insufficient_info']:
if 'LLM' in tenant.fallback_algorithms:
result, interaction = answer_using_llm(question, language, tenant, chat_session)
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['LLM']['name']
result['interaction_id'] = interaction.id
return result return result
except Exception as e: except Exception as e:
current_app.logger.error(f'ask_question: Error processing question: {e}') current_app.logger.error(f'ask_question: Error processing question: {e}')
raise raise
def answer_using_tenant_rag(question, language, tenant, chat_session):
new_interaction = Interaction()
new_interaction.question = question
new_interaction.language = language
new_interaction.timezone = chat_session.timezone
new_interaction.appreciation = None
new_interaction.chat_session_id = chat_session.id
new_interaction.question_at = dt.now(tz.utc)
new_interaction.algorithm_used = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
# Select variables to work with depending on tenant model
model_variables = select_model_variables(tenant)
tenant_info = tenant.to_dict()
# Langchain debugging if required
# set_debug(True)
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
new_interaction.detailed_question = detailed_question
new_interaction.detailed_question_at = dt.now(tz.utc)
retriever = EveAIRetriever(model_variables, tenant_info)
llm = model_variables['llm']
template = model_variables['rag_template']
language_template = create_language_template(template, language)
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
rag_prompt = ChatPromptTemplate.from_template(full_template)
setup_and_retrieval = RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
new_interaction_embeddings = []
if not model_variables['cited_answer_cls']: # The model doesn't support structured feedback
output_parser = StrOutputParser()
chain = setup_and_retrieval | rag_prompt | llm | output_parser
# Invoke the chain with the actual question
answer = chain.invoke(detailed_question)
new_interaction.answer = answer
result = {
'answer': answer,
'citations': [],
'insufficient_info': False
}
else: # The model supports structured feedback
structured_llm = llm.with_structured_output(model_variables['cited_answer_cls'])
chain = setup_and_retrieval | rag_prompt | structured_llm
result = chain.invoke(detailed_question).dict()
current_app.logger.debug(f'ask_question: result answer: {result['answer']}')
current_app.logger.debug(f'ask_question: result citations: {result["citations"]}')
current_app.logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
new_interaction.answer = result['answer']
# Filter out the existing Embedding IDs
given_embedding_ids = [int(emb_id) for emb_id in result['citations']]
embeddings = (
db.session.query(Embedding)
.filter(Embedding.id.in_(given_embedding_ids))
.all()
)
existing_embedding_ids = [emb.id for emb in embeddings]
urls = [emb.document_version.url for emb in embeddings]
for emb_id in existing_embedding_ids:
new_interaction_embedding = InteractionEmbedding(embedding_id=emb_id)
new_interaction_embedding.interaction = new_interaction
new_interaction_embeddings.append(new_interaction_embedding)
result['citations'] = urls
# Disable langchain debugging if set above.
# set_debug(False)
new_interaction.answer_at = dt.now(tz.utc)
chat_session.session_end = dt.now(tz.utc)
try:
db.session.add(chat_session)
db.session.add(new_interaction)
db.session.add_all(new_interaction_embeddings)
db.session.commit()
return result, new_interaction
except SQLAlchemyError as e:
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
raise
def answer_using_llm(question, language, tenant, chat_session):
new_interaction = Interaction()
new_interaction.question = question
new_interaction.language = language
new_interaction.timezone = chat_session.timezone
new_interaction.appreciation = None
new_interaction.chat_session_id = chat_session.id
new_interaction.question_at = dt.now(tz.utc)
new_interaction.algorithm_used = current_app.config['INTERACTION_ALGORITHMS']['LLM']['name']
# Select variables to work with depending on tenant model
model_variables = select_model_variables(tenant)
tenant_info = tenant.to_dict()
# Langchain debugging if required
# set_debug(True)
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
new_interaction.detailed_question = detailed_question
new_interaction.detailed_question_at = dt.now(tz.utc)
retriever = EveAIRetriever(model_variables, tenant_info)
llm = model_variables['llm_no_rag']
template = model_variables['encyclopedia_template']
language_template = create_language_template(template, language)
rag_prompt = ChatPromptTemplate.from_template(language_template)
setup = RunnablePassthrough()
output_parser = StrOutputParser()
new_interaction_embeddings = []
chain = setup | rag_prompt | llm | output_parser
input_question = {"question": detailed_question}
# Invoke the chain with the actual question
answer = chain.invoke(input_question)
new_interaction.answer = answer
result = {
'answer': answer,
'citations': [],
'insufficient_info': False
}
# Disable langchain debugging if set above.
# set_debug(False)
new_interaction.answer_at = dt.now(tz.utc)
chat_session.session_end = dt.now(tz.utc)
try:
db.session.add(chat_session)
db.session.add(new_interaction)
db.session.commit()
return result, new_interaction
except SQLAlchemyError as e:
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
raise
def tasks_ping(): def tasks_ping():
return 'pong' return 'pong'

View File

@@ -19,7 +19,7 @@
'39', '39',
'EveAI-CHAT-6919-1265-9848-6655-9870', 'EveAI-CHAT-6919-1265-9848-6655-9870',
'http://macstudio.ask-eve-ai-local.com', 'http://macstudio.ask-eve-ai-local.com',
'en' 'nl'
); );
eveAI.initializeChat('chat-container'); eveAI.initializeChat('chat-container');
}); });

View File

@@ -10,7 +10,7 @@
--algorithm-color-rag-tenant: #0f0; /* Green for RAG_TENANT */ --algorithm-color-rag-tenant: #0f0; /* Green for RAG_TENANT */
--algorithm-color-rag-wikipedia: #00f; /* Blue for RAG_WIKIPEDIA */ --algorithm-color-rag-wikipedia: #00f; /* Blue for RAG_WIKIPEDIA */
--algorithm-color-rag-google: #ff0; /* Yellow for RAG_GOOGLE */ --algorithm-color-rag-google: #ff0; /* Yellow for RAG_GOOGLE */
--algorithm-color-rag-llm: #800080; /* Purple for RAG_LLM */ --algorithm-color-llm: #800080; /* Purple for RAG_LLM */
/*--font-family: 'Arial, sans-serif'; !* Default font family *!*/ /*--font-family: 'Arial, sans-serif'; !* Default font family *!*/
--font-family: 'ui-sans-serif, -apple-system, system-ui, Segoe UI, Roboto, Ubuntu, Cantarell, Noto Sans, sans-serif, Helvetica, Apple Color Emoji, Arial, Segoe UI Emoji, Segoe UI Symbol'; --font-family: 'ui-sans-serif, -apple-system, system-ui, Segoe UI, Roboto, Ubuntu, Cantarell, Noto Sans, sans-serif, Helvetica, Apple Color Emoji, Arial, Segoe UI Emoji, Segoe UI Symbol';
@@ -193,8 +193,8 @@
color: var(--algorithm-color-rag-google); color: var(--algorithm-color-rag-google);
} }
.fingerprint-rag-llm { .fingerprint-llm {
color: var(--algorithm-color-rag-llm); color: var(--algorithm-color-llm);
} }
/* Styling for citation links */ /* Styling for citation links */

View File

@@ -8,6 +8,7 @@ class EveAIChatWidget extends HTMLElement {
this.socket = null; // Initialize socket to null this.socket = null; // Initialize socket to null
this.attributesSet = false; // Flag to check if all attributes are set this.attributesSet = false; // Flag to check if all attributes are set
this.jwtToken = null; // Initialize jwtToken to null this.jwtToken = null; // Initialize jwtToken to null
this.userTimezone = Intl.DateTimeFormat().resolvedOptions().timeZone; // Detect user's timezone
console.log('EveAIChatWidget constructor called'); console.log('EveAIChatWidget constructor called');
} }
@@ -229,8 +230,8 @@ class EveAIChatWidget extends HTMLElement {
case 'RAG_GOOGLE': case 'RAG_GOOGLE':
algorithmClass = 'fingerprint-rag-google'; algorithmClass = 'fingerprint-rag-google';
break; break;
case 'RAG_LLM': case 'LLM':
algorithmClass = 'fingerprint-rag-llm'; algorithmClass = 'fingerprint-llm';
break; break;
default: default:
algorithmClass = ''; algorithmClass = '';
@@ -299,7 +300,13 @@ toggleFeedback(thumbsUp, thumbsDown, feedback, interactionId) {
return; return;
} }
console.log('Sending message to backend'); console.log('Sending message to backend');
this.socket.emit('user_message', { tenantId: this.tenantId, token: this.jwtToken, message, language: this.language }); this.socket.emit('user_message', {
tenantId: this.tenantId,
token: this.jwtToken,
message,
language: this.language,
timezone: this.userTimezone
});
this.setStatusMessage('Processing started ...') this.setStatusMessage('Processing started ...')
} }