- Move from OpenAI to Mistral Embeddings
- Move embedding model settings from tenant to catalog - BUG: error processing configuration for chunking patterns in HTML_PROCESSOR - Removed eveai_chat from docker-files and nginx configuration, as it is now obsolete - BUG: error in Library Operations when creating a new default RAG library - BUG: Added public type in migration scripts - Removed SocketIO from all code and requirements.txt
This commit is contained in:
@@ -1,23 +1,25 @@
|
||||
import os
|
||||
from typing import Dict, Any, Optional
|
||||
from typing import Dict, Any, Optional, Tuple
|
||||
|
||||
import langcodes
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
|
||||
from common.langchain.llm_metrics_handler import LLMMetricsHandler
|
||||
from common.langchain.templates.template_manager import TemplateManager
|
||||
from langchain_openai import OpenAIEmbeddings, ChatOpenAI, OpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_mistralai import ChatMistralAI
|
||||
from flask import current_app
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
from common.langchain.tracked_openai_embeddings import TrackedOpenAIEmbeddings
|
||||
from common.eveai_model.tracked_mistral_embeddings import TrackedMistralAIEmbeddings
|
||||
from common.langchain.tracked_transcription import TrackedOpenAITranscription
|
||||
from common.models.user import Tenant
|
||||
from common.utils.cache.base import CacheHandler
|
||||
from config.model_config import MODEL_CONFIG
|
||||
from common.extensions import template_manager, cache_manager
|
||||
from common.models.document import EmbeddingLargeOpenAI, EmbeddingSmallOpenAI
|
||||
from common.utils.eveai_exceptions import EveAITenantNotFound
|
||||
from common.extensions import template_manager
|
||||
from common.models.document import EmbeddingMistral
|
||||
from common.utils.eveai_exceptions import EveAITenantNotFound, EveAIInvalidEmbeddingModel
|
||||
|
||||
llm_model_cache: Dict[Tuple[str, float], BaseChatModel] = {}
|
||||
llm_metrics_handler = LLMMetricsHandler()
|
||||
|
||||
|
||||
def create_language_template(template: str, language: str) -> str:
|
||||
@@ -55,6 +57,63 @@ def replace_variable_in_template(template: str, variable: str, value: str) -> st
|
||||
return template.replace(variable, value or "")
|
||||
|
||||
|
||||
def get_embedding_model_and_class(tenant_id, catalog_id, full_embedding_name):
|
||||
"""
|
||||
Retrieve the embedding model and embedding model class to store Embeddings
|
||||
|
||||
Args:
|
||||
tenant_id: ID of the tenant
|
||||
catalog_id: ID of the catalog
|
||||
full_embedding_name: The full name of the embedding model: <provider>.<model>
|
||||
|
||||
Returns:
|
||||
embedding_model, embedding_model_class
|
||||
"""
|
||||
embedding_provider, embedding_model_name = full_embedding_name.split('.')
|
||||
|
||||
# Calculate the embedding model to be used
|
||||
if embedding_provider == "mistral":
|
||||
api_key = current_app.config['MISTRAL_API_KEY']
|
||||
embedding_model = TrackedMistralAIEmbeddings(
|
||||
model=embedding_model_name
|
||||
)
|
||||
else:
|
||||
raise EveAIInvalidEmbeddingModel(tenant_id, catalog_id)
|
||||
|
||||
# Calculate the Embedding Model Class to be used to store embeddings
|
||||
if embedding_model_name == "mistral-embed":
|
||||
embedding_model_class = EmbeddingMistral
|
||||
else:
|
||||
raise EveAIInvalidEmbeddingModel(tenant_id, catalog_id)
|
||||
|
||||
return embedding_model, embedding_model_class
|
||||
|
||||
|
||||
def get_llm(full_model_name, temperature):
|
||||
if not full_model_name:
|
||||
full_model_name = 'openai.gpt-4o' # Default to gpt-4o for now, as this is the original model developed against
|
||||
|
||||
llm = llm_model_cache.get((full_model_name, temperature))
|
||||
if not llm:
|
||||
llm_provider, llm_model_name = full_model_name.split('.')
|
||||
if llm_provider == "openai":
|
||||
llm = ChatOpenAI(
|
||||
api_key=current_app.config['OPENAI_API_KEY'],
|
||||
model=llm_model_name,
|
||||
temperature=temperature,
|
||||
callbacks=[llm_metrics_handler]
|
||||
)
|
||||
elif llm_provider == "mistral":
|
||||
llm = ChatMistralAI(
|
||||
api_key=current_app.config['MISTRAL_API_KEY'],
|
||||
model=llm_model_name,
|
||||
temperature=temperature,
|
||||
callbacks=[llm_metrics_handler]
|
||||
)
|
||||
|
||||
llm_model_cache[(full_model_name, temperature)] = llm
|
||||
|
||||
|
||||
class ModelVariables:
|
||||
"""Manages model-related variables and configurations"""
|
||||
|
||||
@@ -63,15 +122,13 @@ class ModelVariables:
|
||||
Initialize ModelVariables with tenant and optional template manager
|
||||
|
||||
Args:
|
||||
tenant: Tenant instance
|
||||
template_manager: Optional TemplateManager instance
|
||||
tenant_id: Tenant instance
|
||||
variables: Optional variables
|
||||
"""
|
||||
current_app.logger.info(f'Model variables initialized with tenant {tenant_id} and variables \n{variables}')
|
||||
self.tenant_id = tenant_id
|
||||
self._variables = variables if variables is not None else self._initialize_variables()
|
||||
current_app.logger.info(f'Model _variables initialized to {self._variables}')
|
||||
self._embedding_model = None
|
||||
self._embedding_model_class = None
|
||||
self._llm_instances = {}
|
||||
self.llm_metrics_handler = LLMMetricsHandler()
|
||||
self._transcription_model = None
|
||||
@@ -85,7 +142,6 @@ class ModelVariables:
|
||||
raise EveAITenantNotFound(self.tenant_id)
|
||||
|
||||
# Set model providers
|
||||
variables['embedding_provider'], variables['embedding_model'] = tenant.embedding_model.split('.')
|
||||
variables['llm_provider'], variables['llm_model'] = tenant.llm_model.split('.')
|
||||
variables['llm_full_model'] = tenant.llm_model
|
||||
|
||||
@@ -102,28 +158,6 @@ class ModelVariables:
|
||||
|
||||
return variables
|
||||
|
||||
@property
|
||||
def embedding_model(self):
|
||||
"""Get the embedding model instance"""
|
||||
if self._embedding_model is None:
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
self._embedding_model = TrackedOpenAIEmbeddings(
|
||||
api_key=api_key,
|
||||
model=self._variables['embedding_model']
|
||||
)
|
||||
return self._embedding_model
|
||||
|
||||
@property
|
||||
def embedding_model_class(self):
|
||||
"""Get the embedding model class"""
|
||||
if self._embedding_model_class is None:
|
||||
if self._variables['embedding_model'] == 'text-embedding-3-large':
|
||||
self._embedding_model_class = EmbeddingLargeOpenAI
|
||||
else: # text-embedding-3-small
|
||||
self._embedding_model_class = EmbeddingSmallOpenAI
|
||||
|
||||
return self._embedding_model_class
|
||||
|
||||
@property
|
||||
def annotation_chunk_length(self):
|
||||
return self._variables['annotation_chunk_length']
|
||||
|
||||
Reference in New Issue
Block a user