Improving chat functionality significantly throughout the application.
This commit is contained in:
@@ -1,12 +1,32 @@
|
||||
import langcodes
|
||||
from flask import current_app
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain_community.embeddings import OpenAIEmbeddings
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
import ast
|
||||
from typing import List
|
||||
|
||||
from common.models.document import EmbeddingSmallOpenAI
|
||||
|
||||
|
||||
class CitedAnswer(BaseModel):
|
||||
"""Default docstring - to be replaced with actual prompt"""
|
||||
|
||||
answer: str = Field(
|
||||
...,
|
||||
description="The answer to the user question, based on the given sources",
|
||||
)
|
||||
citations: List[int] = Field(
|
||||
...,
|
||||
description="The integer IDs of the SPECIFIC sources that were used to generate the answer"
|
||||
)
|
||||
|
||||
|
||||
def set_language_prompt_template(cls, language_prompt):
|
||||
cls.__doc__ = language_prompt
|
||||
|
||||
|
||||
def select_model_variables(tenant):
|
||||
embedding_provider = tenant.embedding_model.rsplit('.', 1)[0]
|
||||
embedding_model = tenant.embedding_model.rsplit('.', 1)[1]
|
||||
@@ -60,7 +80,7 @@ def select_model_variables(tenant):
|
||||
case 'text-embedding-3-small':
|
||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
||||
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
||||
model='text-embedding-3-small')
|
||||
model='text-embedding-3-small')
|
||||
model_variables['embedding_db_model'] = EmbeddingSmallOpenAI
|
||||
model_variables['min_chunk_size'] = current_app.config.get('OAI_TE3S_MIN_CHUNK_SIZE')
|
||||
model_variables['max_chunk_size'] = current_app.config.get('OAI_TE3S_MAX_CHUNK_SIZE')
|
||||
@@ -78,20 +98,34 @@ def select_model_variables(tenant):
|
||||
model_variables['llm'] = ChatOpenAI(api_key=api_key,
|
||||
model=llm_model,
|
||||
temperature=model_variables['RAG_temperature'])
|
||||
tool_calling_supported = False
|
||||
match llm_model:
|
||||
case 'gpt-4-turbo' | 'gpt-4o':
|
||||
summary_template = current_app.config.get('GPT4_SUMMARY_TEMPLATE')
|
||||
rag_template = current_app.config.get('GPT4_RAG_TEMPLATE')
|
||||
tool_calling_supported = True
|
||||
case 'gpt-3-5-turbo':
|
||||
summary_template = current_app.config.get('GPT3_5_SUMMARY_TEMPLATE')
|
||||
rag_template = current_app.config.get('GPT3_5_RAG_TEMPLATE')
|
||||
case _:
|
||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
||||
f'error: Invalid chat model')
|
||||
model_variables['summary_prompt'] = ChatPromptTemplate.from_template(summary_template)
|
||||
model_variables['rag_prompt'] = ChatPromptTemplate.from_template(rag_template)
|
||||
model_variables['summary_template'] = summary_template
|
||||
model_variables['rag_template'] = rag_template
|
||||
if tool_calling_supported:
|
||||
model_variables['cited_answer_cls'] = CitedAnswer
|
||||
case _:
|
||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
||||
f'error: Invalid chat provider')
|
||||
|
||||
return model_variables
|
||||
|
||||
|
||||
def create_language_template(template, language):
|
||||
try:
|
||||
full_language = langcodes.Language.make(language=language)
|
||||
language_template = template.replace('{language}', full_language.display_name())
|
||||
except ValueError:
|
||||
language_template = template.replace('{language}', language)
|
||||
|
||||
return language_template
|
||||
|
||||
Reference in New Issue
Block a user