98 lines
4.2 KiB
Python
98 lines
4.2 KiB
Python
from flask import current_app
|
|
from langchain.embeddings import OpenAIEmbeddings
|
|
from langchain.chat_models import ChatOpenAI
|
|
from langchain.prompts import ChatPromptTemplate
|
|
import ast
|
|
|
|
from common.models.document import EmbeddingSmallOpenAI
|
|
|
|
|
|
def select_model_variables(tenant):
|
|
embedding_provider = tenant.embedding_model.rsplit('.', 1)[0]
|
|
embedding_model = tenant.embedding_model.rsplit('.', 1)[1]
|
|
|
|
llm_provider = tenant.llm_model.rsplit('.', 1)[0]
|
|
llm_model = tenant.llm_model.rsplit('.', 1)[1]
|
|
|
|
# Set model variables
|
|
model_variables = {}
|
|
if tenant.es_k:
|
|
model_variables['k'] = tenant.es_k
|
|
else:
|
|
model_variables['k'] = 5
|
|
|
|
if tenant.es_similarity_threshold:
|
|
model_variables['similarity_threshold'] = tenant.es_similarity_threshold
|
|
else:
|
|
model_variables['similarity_threshold'] = 0.7
|
|
|
|
if tenant.chat_RAG_temperature:
|
|
model_variables['RAG_temperature'] = tenant.chat_RAG_temperature
|
|
else:
|
|
model_variables['RAG_temperature'] = 0.3
|
|
|
|
if tenant.chat_no_RAG_temperature:
|
|
model_variables['no_RAG_temperature'] = tenant.chat_no_RAG_temperature
|
|
else:
|
|
model_variables['no_RAG_temperature'] = 0.5
|
|
|
|
# Set Tuning variables
|
|
if tenant.embed_tuning:
|
|
model_variables['embed_tuning'] = tenant.embed_tuning
|
|
else:
|
|
model_variables['embed_tuning'] = False
|
|
|
|
if tenant.rag_tuning:
|
|
model_variables['rag_tuning'] = tenant.rag_tuning
|
|
else:
|
|
model_variables['rag_tuning'] = False
|
|
|
|
# Set HTML Chunking Variables
|
|
model_variables['html_tags'] = tenant.html_tags
|
|
model_variables['html_end_tags'] = tenant.html_end_tags
|
|
model_variables['html_included_elements'] = tenant.html_included_elements
|
|
model_variables['html_excluded_elements'] = tenant.html_excluded_elements
|
|
|
|
# Set Embedding variables
|
|
match embedding_provider:
|
|
case 'openai':
|
|
match embedding_model:
|
|
case 'text-embedding-3-small':
|
|
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
|
model='text-embedding-3-small')
|
|
model_variables['embedding_db_model'] = EmbeddingSmallOpenAI
|
|
model_variables['min_chunk_size'] = current_app.config.get('OAI_TE3S_MIN_CHUNK_SIZE')
|
|
model_variables['max_chunk_size'] = current_app.config.get('OAI_TE3S_MAX_CHUNK_SIZE')
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid embedding model')
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid embedding provider')
|
|
|
|
# Set Chat model variables
|
|
match llm_provider:
|
|
case 'openai':
|
|
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
model_variables['llm'] = ChatOpenAI(api_key=api_key,
|
|
model=llm_model,
|
|
temperature=model_variables['RAG_temperature'])
|
|
match llm_model:
|
|
case 'gpt-4-turbo' | 'gpt-4o':
|
|
summary_template = current_app.config.get('GPT4_SUMMARY_TEMPLATE')
|
|
rag_template = current_app.config.get('GPT4_RAG_TEMPLATE')
|
|
case 'gpt-3-5-turbo':
|
|
summary_template = current_app.config.get('GPT3_5_SUMMARY_TEMPLATE')
|
|
rag_template = current_app.config.get('GPT3_5_RAG_TEMPLATE')
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid chat model')
|
|
model_variables['summary_prompt'] = ChatPromptTemplate.from_template(summary_template)
|
|
model_variables['rag_prompt'] = ChatPromptTemplate.from_template(rag_template)
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid chat provider')
|
|
|
|
return model_variables
|