211 lines
9.7 KiB
Python
211 lines
9.7 KiB
Python
import langcodes
|
|
from flask import current_app
|
|
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
|
from langchain_anthropic import ChatAnthropic
|
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
from langchain.prompts import ChatPromptTemplate
|
|
import ast
|
|
from typing import List
|
|
from openai import OpenAI
|
|
# from groq import Groq
|
|
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
|
|
|
from common.models.document import EmbeddingSmallOpenAI, EmbeddingLargeOpenAI
|
|
|
|
|
|
class CitedAnswer(BaseModel):
|
|
"""Default docstring - to be replaced with actual prompt"""
|
|
|
|
answer: str = Field(
|
|
...,
|
|
description="The answer to the user question, based on the given sources",
|
|
)
|
|
citations: List[int] = Field(
|
|
...,
|
|
description="The integer IDs of the SPECIFIC sources that were used to generate the answer"
|
|
)
|
|
insufficient_info: bool = Field(
|
|
False, # Default value is set to False
|
|
description="A boolean indicating wether given sources were sufficient or not to generate the answer"
|
|
)
|
|
|
|
|
|
def set_language_prompt_template(cls, language_prompt):
|
|
cls.__doc__ = language_prompt
|
|
|
|
|
|
def select_model_variables(tenant):
|
|
embedding_provider = tenant.embedding_model.rsplit('.', 1)[0]
|
|
embedding_model = tenant.embedding_model.rsplit('.', 1)[1]
|
|
|
|
llm_provider = tenant.llm_model.rsplit('.', 1)[0]
|
|
llm_model = tenant.llm_model.rsplit('.', 1)[1]
|
|
|
|
# Set model variables
|
|
model_variables = {}
|
|
if tenant.es_k:
|
|
model_variables['k'] = tenant.es_k
|
|
else:
|
|
model_variables['k'] = 5
|
|
|
|
if tenant.es_similarity_threshold:
|
|
model_variables['similarity_threshold'] = tenant.es_similarity_threshold
|
|
else:
|
|
model_variables['similarity_threshold'] = 0.7
|
|
|
|
if tenant.chat_RAG_temperature:
|
|
model_variables['RAG_temperature'] = tenant.chat_RAG_temperature
|
|
else:
|
|
model_variables['RAG_temperature'] = 0.3
|
|
|
|
if tenant.chat_no_RAG_temperature:
|
|
model_variables['no_RAG_temperature'] = tenant.chat_no_RAG_temperature
|
|
else:
|
|
model_variables['no_RAG_temperature'] = 0.5
|
|
|
|
# Set Tuning variables
|
|
if tenant.embed_tuning:
|
|
model_variables['embed_tuning'] = tenant.embed_tuning
|
|
else:
|
|
model_variables['embed_tuning'] = False
|
|
|
|
if tenant.rag_tuning:
|
|
model_variables['rag_tuning'] = tenant.rag_tuning
|
|
else:
|
|
model_variables['rag_tuning'] = False
|
|
|
|
if tenant.rag_context:
|
|
model_variables['rag_context'] = tenant.rag_context
|
|
else:
|
|
model_variables['rag_context'] = " "
|
|
|
|
# Set HTML Chunking Variables
|
|
model_variables['html_tags'] = tenant.html_tags
|
|
model_variables['html_end_tags'] = tenant.html_end_tags
|
|
model_variables['html_included_elements'] = tenant.html_included_elements
|
|
model_variables['html_excluded_elements'] = tenant.html_excluded_elements
|
|
|
|
# Set Chunk Size variables
|
|
model_variables['min_chunk_size'] = tenant.min_chunk_size
|
|
model_variables['max_chunk_size'] = tenant.max_chunk_size
|
|
|
|
# Set Embedding variables
|
|
match embedding_provider:
|
|
case 'openai':
|
|
portkey_metadata = {'tenant_id': str(tenant.id)}
|
|
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
|
provider='openai',
|
|
metadata=portkey_metadata)
|
|
match embedding_model:
|
|
case 'text-embedding-3-small':
|
|
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
|
model='text-embedding-3-small',
|
|
base_url=PORTKEY_GATEWAY_URL,
|
|
default_headers=portkey_headers
|
|
)
|
|
model_variables['embedding_db_model'] = EmbeddingSmallOpenAI
|
|
case 'text-embedding-3-large':
|
|
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
|
model='text-embedding-3-large',
|
|
base_url=PORTKEY_GATEWAY_URL,
|
|
default_headers=portkey_headers
|
|
)
|
|
model_variables['embedding_db_model'] = EmbeddingLargeOpenAI
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid embedding model')
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid embedding provider')
|
|
|
|
# Set Chat model variables
|
|
match llm_provider:
|
|
case 'openai':
|
|
portkey_metadata = {'tenant_id': str(tenant.id)}
|
|
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
|
metadata=portkey_metadata,
|
|
provider='openai')
|
|
tool_calling_supported = False
|
|
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
model_variables['llm'] = ChatOpenAI(api_key=api_key,
|
|
model=llm_model,
|
|
temperature=model_variables['RAG_temperature'],
|
|
base_url=PORTKEY_GATEWAY_URL,
|
|
default_headers=portkey_headers)
|
|
model_variables['llm_no_rag'] = ChatOpenAI(api_key=api_key,
|
|
model=llm_model,
|
|
temperature=model_variables['no_RAG_temperature'],
|
|
base_url=PORTKEY_GATEWAY_URL,
|
|
default_headers=portkey_headers)
|
|
tool_calling_supported = False
|
|
match llm_model:
|
|
case 'gpt-4-turbo' | 'gpt-4o':
|
|
tool_calling_supported = True
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid chat model')
|
|
case 'anthropic':
|
|
api_key = current_app.config.get('ANTHROPIC_API_KEY')
|
|
# Anthropic does not have the same 'generic' model names as OpenAI
|
|
llm_model_ext = current_app.config.get('ANTHROPIC_LLM_VERSIONS').get(llm_model)
|
|
model_variables['llm'] = ChatAnthropic(api_key=api_key,
|
|
model=llm_model_ext,
|
|
temperature=model_variables['RAG_temperature'])
|
|
model_variables['llm_no_rag'] = ChatAnthropic(api_key=api_key,
|
|
model=llm_model_ext,
|
|
temperature=model_variables['RAG_temperature'])
|
|
tool_calling_supported = True
|
|
case _:
|
|
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
f'error: Invalid chat provider')
|
|
|
|
if tool_calling_supported:
|
|
model_variables['cited_answer_cls'] = CitedAnswer
|
|
|
|
templates = current_app.config['PROMPT_TEMPLATES'][f'{llm_provider}.{llm_model}']
|
|
model_variables['summary_template'] = templates['summary']
|
|
model_variables['rag_template'] = templates['rag']
|
|
model_variables['history_template'] = templates['history']
|
|
model_variables['encyclopedia_template'] = templates['encyclopedia']
|
|
model_variables['transcript_template'] = templates['transcript']
|
|
model_variables['html_parse_template'] = templates['html_parse']
|
|
model_variables['pdf_parse_template'] = templates['pdf_parse']
|
|
|
|
model_variables['annotation_chunk_length'] = current_app.config['ANNOTATION_TEXT_CHUNK_LENGTH'][tenant.llm_model]
|
|
|
|
# Transcription Client Variables.
|
|
# Using Groq
|
|
# api_key = current_app.config.get('GROQ_API_KEY')
|
|
# model_variables['transcription_client'] = Groq(api_key=api_key)
|
|
# model_variables['transcription_model'] = 'whisper-large-v3'
|
|
|
|
# Using OpenAI for transcriptions
|
|
portkey_metadata = {'tenant_id': str(tenant.id)}
|
|
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
|
metadata=portkey_metadata,
|
|
provider='openai'
|
|
)
|
|
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
model_variables['transcription_client'] = OpenAI(api_key=api_key,
|
|
base_url=PORTKEY_GATEWAY_URL,
|
|
default_headers=portkey_headers)
|
|
model_variables['transcription_model'] = 'whisper-1'
|
|
|
|
return model_variables
|
|
|
|
|
|
def create_language_template(template, language):
|
|
try:
|
|
full_language = langcodes.Language.make(language=language)
|
|
language_template = template.replace('{language}', full_language.display_name())
|
|
except ValueError:
|
|
language_template = template.replace('{language}', language)
|
|
|
|
return language_template
|
|
|
|
|
|
def replace_variable_in_template(template, variable, value):
|
|
return template.replace(variable, value)
|