from datetime import datetime as dt, timezone as tz from flask import current_app from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableParallel from sqlalchemy.exc import SQLAlchemyError from celery import states from celery.exceptions import Ignore import os # OpenAI imports from langchain_openai import OpenAIEmbeddings, ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import CharacterTextSplitter from langchain_core.exceptions import LangChainException from common.utils.database import Database from common.models.document import DocumentVersion, EmbeddingMistral, EmbeddingSmallOpenAI from common.models.user import Tenant from common.extensions import db from common.utils.celery_utils import current_celery from common.utils.model_utils import select_model_variables from common.langchain.EveAIRetriever import EveAIRetriever @current_celery.task(name='ask_question', queue='llm_interactions') def ask_question(tenant_id, question): current_app.logger.debug('In ask_question') current_app.logger.info(f'ask_question: Received question for tenant {tenant_id}: {question}. Processing...') try: # Retrieve the tenant tenant = Tenant.query.get(tenant_id) if not tenant: raise Exception(f'Tenant {tenant_id} not found.') # Ensure we are working in the correct database schema Database(tenant_id).switch_schema() # Select variables to work with depending on tenant model model_variables = select_model_variables(tenant) current_app.logger.debug(f'ask_question: model_variables: {model_variables}') retriever = EveAIRetriever(model_variables) # Search the database for relevant embeddings relevant_embeddings = retriever.invoke(question) return 'No response yet, check back later.' except Exception as e: current_app.logger.error(f'ask_question: Error processing question: {e}') raise def tasks_ping(): return 'pong'