- Translation Service improvement - Enable activation / deactivation of Processors - Renew API-keys for Mistral (leading to workspaces) - Align all Document views to use of a session catalog - Allow for different processors for the same file type
43 lines
1.5 KiB
Python
43 lines
1.5 KiB
Python
import xxhash
|
|
import json
|
|
|
|
from langchain_core.output_parsers import StrOutputParser
|
|
from langchain_core.prompts import ChatPromptTemplate
|
|
from langchain_core.runnables import RunnablePassthrough
|
|
|
|
from common.langchain.persistent_llm_metrics_handler import PersistentLLMMetricsHandler
|
|
from common.utils.model_utils import get_template, replace_variable_in_template
|
|
|
|
class TranslationService:
|
|
def __init__(self, tenant_id):
|
|
self.tenant_id = tenant_id
|
|
|
|
def translate_text(self, text_to_translate: str, target_lang: str, source_lang: str = None, context: str = None) -> tuple[
|
|
str, dict[str, int | float]]:
|
|
prompt_params = {
|
|
"text_to_translate": text_to_translate,
|
|
"target_lang": target_lang,
|
|
}
|
|
if context:
|
|
template, llm = get_template("translation_with_context")
|
|
prompt_params["context"] = context
|
|
else:
|
|
template, llm = get_template("translation_without_context")
|
|
|
|
# Add a metrics handler to capture usage
|
|
|
|
metrics_handler = PersistentLLMMetricsHandler()
|
|
existing_callbacks = llm.callbacks
|
|
llm.callbacks = existing_callbacks + [metrics_handler]
|
|
|
|
translation_prompt = ChatPromptTemplate.from_template(template)
|
|
|
|
setup = RunnablePassthrough()
|
|
|
|
chain = (setup | translation_prompt | llm | StrOutputParser())
|
|
|
|
translation = chain.invoke(prompt_params)
|
|
|
|
metrics = metrics_handler.get_metrics()
|
|
|
|
return translation, metrics |