- Move to Mistral iso OpenAI as primary choice
This commit is contained in:
@@ -3,7 +3,7 @@ from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from common.extensions import db, minio_client
|
||||
from common.utils.model_utils import create_language_template
|
||||
from common.utils.model_utils import create_language_template, get_embedding_llm
|
||||
from .base_processor import BaseProcessor
|
||||
from common.utils.business_event_context import current_event
|
||||
from .processor_registry import ProcessorRegistry
|
||||
@@ -81,7 +81,7 @@ class HTMLProcessor(BaseProcessor):
|
||||
def _generate_markdown_from_html(self, html_content):
|
||||
self._log(f'Generating markdown from HTML for tenant {self.tenant.id}')
|
||||
|
||||
llm = self.model_variables.get_llm()
|
||||
llm = get_embedding_llm()
|
||||
template = self.model_variables.get_template("html_parse")
|
||||
parse_prompt = ChatPromptTemplate.from_template(template)
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
@@ -8,7 +8,7 @@ import re
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
|
||||
from common.extensions import minio_client
|
||||
from common.utils.model_utils import create_language_template
|
||||
from common.utils.model_utils import create_language_template, get_embedding_llm
|
||||
from .base_processor import BaseProcessor
|
||||
from common.utils.business_event_context import current_event
|
||||
from .processor_registry import ProcessorRegistry
|
||||
@@ -210,7 +210,7 @@ class PDFProcessor(BaseProcessor):
|
||||
return text_splitter.split_text(content)
|
||||
|
||||
def _process_chunks_with_llm(self, chunks):
|
||||
llm = self.model_variables.get_llm()
|
||||
llm = get_embedding_llm()
|
||||
template = self.model_variables.get_template('pdf_parse')
|
||||
pdf_prompt = ChatPromptTemplate.from_template(template)
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
@@ -4,7 +4,7 @@ from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
|
||||
from common.utils.model_utils import create_language_template
|
||||
from common.utils.model_utils import create_language_template, get_embedding_llm
|
||||
from .base_processor import BaseProcessor
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
@@ -46,7 +46,7 @@ class TranscriptionBaseProcessor(BaseProcessor):
|
||||
|
||||
def _process_chunks(self, chunks):
|
||||
self.log_tuning("_process_chunks", {"Nr of Chunks": len(chunks)})
|
||||
llm = self.model_variables.get_llm()
|
||||
llm = get_embedding_llm()
|
||||
template = self.model_variables.get_template('transcript')
|
||||
language_template = create_language_template(template, self.document_version.language)
|
||||
transcript_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
|
||||
Reference in New Issue
Block a user