- Add functionality to add a default dictionary for configuration fields

- Correct entitlement processing
- Remove get_template functionality from ModelVariables, define it directly with LLM model definition in configuration file.
This commit is contained in:
Josako
2025-05-19 14:10:09 +02:00
parent d2a9092f46
commit 28aea85b10
15 changed files with 386 additions and 85 deletions

View File

@@ -3,7 +3,7 @@ from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from common.extensions import db, minio_client
from common.utils.model_utils import create_language_template, get_embedding_llm
from common.utils.model_utils import create_language_template, get_embedding_llm, get_template
from .base_processor import BaseProcessor
from common.utils.business_event_context import current_event
from .processor_registry import ProcessorRegistry
@@ -81,8 +81,7 @@ class HTMLProcessor(BaseProcessor):
def _generate_markdown_from_html(self, html_content):
self._log(f'Generating markdown from HTML for tenant {self.tenant.id}')
llm = get_embedding_llm()
template = self.model_variables.get_template("html_parse")
template, llm = get_template("html_parse")
parse_prompt = ChatPromptTemplate.from_template(template)
setup = RunnablePassthrough()
output_parser = StrOutputParser()

View File

@@ -9,7 +9,7 @@ from langchain_core.runnables import RunnablePassthrough
from common.eveai_model.tracked_mistral_ocr_client import TrackedMistralOcrClient
from common.extensions import minio_client
from common.utils.model_utils import create_language_template, get_embedding_llm
from common.utils.model_utils import create_language_template, get_embedding_llm, get_template
from .base_processor import BaseProcessor
from common.utils.business_event_context import current_event
from .processor_registry import ProcessorRegistry
@@ -208,8 +208,7 @@ class PDFProcessor(BaseProcessor):
return text_splitter.split_text(content)
def _process_chunks_with_llm(self, chunks):
llm = get_embedding_llm()
template = self.model_variables.get_template('pdf_parse')
template, llm = get_template('pdf_parse')
pdf_prompt = ChatPromptTemplate.from_template(template)
setup = RunnablePassthrough()
output_parser = StrOutputParser()

View File

@@ -4,7 +4,7 @@ from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from common.utils.model_utils import create_language_template, get_embedding_llm
from common.utils.model_utils import create_language_template, get_embedding_llm, get_template
from .base_processor import BaseProcessor
from common.utils.business_event_context import current_event
@@ -46,8 +46,7 @@ class TranscriptionBaseProcessor(BaseProcessor):
def _process_chunks(self, chunks):
self.log_tuning("_process_chunks", {"Nr of Chunks": len(chunks)})
llm = get_embedding_llm()
template = self.model_variables.get_template('transcript')
template, llm = get_template('transcript')
language_template = create_language_template(template, self.document_version.language)
transcript_prompt = ChatPromptTemplate.from_template(language_template)
setup = RunnablePassthrough()