Compare commits
6 Commits
v1.0.8-alf
...
v1.0.9-alf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
883175b8f5 | ||
|
|
ae697df4c9 | ||
|
|
d9cb00fcdc | ||
|
|
ee1b0f1cfa | ||
|
|
a740c96630 | ||
|
|
67bdeac434 |
13
CHANGELOG.md
13
CHANGELOG.md
@@ -25,6 +25,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
### Security
|
### Security
|
||||||
- In case of vulnerabilities.
|
- In case of vulnerabilities.
|
||||||
|
|
||||||
|
## [1.0.8-alfa] - 2024-09-12
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Tenant type defined to allow for active, inactive, demo ... tenants
|
||||||
|
- Search and filtering functionality on Tenants
|
||||||
|
- Implementation of health checks (1st version)
|
||||||
|
- Provision for Prometheus monitoring (no implementation yet)
|
||||||
|
- Refine audio_processor and srt_processor to reduce duplicate code and support larger files
|
||||||
|
- Introduction of repopack to reason in LLMs about the code
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Refine audio_processor and srt_processor to reduce duplicate code and support larger files
|
||||||
|
|
||||||
## [1.0.7-alfa] - 2024-09-12
|
## [1.0.7-alfa] - 2024-09-12
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
@@ -1,23 +1,31 @@
|
|||||||
from langchain_core.retrievers import BaseRetriever
|
from langchain_core.retrievers import BaseRetriever
|
||||||
from sqlalchemy import asc
|
from sqlalchemy import asc
|
||||||
from sqlalchemy.exc import SQLAlchemyError
|
from sqlalchemy.exc import SQLAlchemyError
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import Field, BaseModel, PrivateAttr
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
from flask import current_app
|
from flask import current_app
|
||||||
|
|
||||||
from common.extensions import db
|
from common.extensions import db
|
||||||
from common.models.interaction import ChatSession, Interaction
|
from common.models.interaction import ChatSession, Interaction
|
||||||
from common.utils.datetime_utils import get_date_in_timezone
|
from common.utils.model_utils import ModelVariables
|
||||||
|
|
||||||
|
|
||||||
class EveAIHistoryRetriever(BaseRetriever):
|
class EveAIHistoryRetriever(BaseRetriever, BaseModel):
|
||||||
model_variables: Dict[str, Any] = Field(...)
|
_model_variables: ModelVariables = PrivateAttr()
|
||||||
session_id: str = Field(...)
|
_session_id: str = PrivateAttr()
|
||||||
|
|
||||||
def __init__(self, model_variables: Dict[str, Any], session_id: str):
|
def __init__(self, model_variables: ModelVariables, session_id: str):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.model_variables = model_variables
|
self._model_variables = model_variables
|
||||||
self.session_id = session_id
|
self._session_id = session_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_variables(self) -> ModelVariables:
|
||||||
|
return self._model_variables
|
||||||
|
|
||||||
|
@property
|
||||||
|
def session_id(self) -> str:
|
||||||
|
return self._session_id
|
||||||
|
|
||||||
def _get_relevant_documents(self, query: str):
|
def _get_relevant_documents(self, query: str):
|
||||||
current_app.logger.debug(f'Retrieving history of interactions for query: {query}')
|
current_app.logger.debug(f'Retrieving history of interactions for query: {query}')
|
||||||
@@ -1,30 +1,39 @@
|
|||||||
from langchain_core.retrievers import BaseRetriever
|
from langchain_core.retrievers import BaseRetriever
|
||||||
from sqlalchemy import func, and_, or_, desc
|
from sqlalchemy import func, and_, or_, desc
|
||||||
from sqlalchemy.exc import SQLAlchemyError
|
from sqlalchemy.exc import SQLAlchemyError
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field, PrivateAttr
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
from flask import current_app
|
from flask import current_app
|
||||||
|
|
||||||
from common.extensions import db
|
from common.extensions import db
|
||||||
from common.models.document import Document, DocumentVersion
|
from common.models.document import Document, DocumentVersion
|
||||||
from common.utils.datetime_utils import get_date_in_timezone
|
from common.utils.datetime_utils import get_date_in_timezone
|
||||||
|
from common.utils.model_utils import ModelVariables
|
||||||
|
|
||||||
|
|
||||||
class EveAIRetriever(BaseRetriever):
|
class EveAIRetriever(BaseRetriever, BaseModel):
|
||||||
model_variables: Dict[str, Any] = Field(...)
|
_model_variables: ModelVariables = PrivateAttr()
|
||||||
tenant_info: Dict[str, Any] = Field(...)
|
_tenant_info: Dict[str, Any] = PrivateAttr()
|
||||||
|
|
||||||
def __init__(self, model_variables: Dict[str, Any], tenant_info: Dict[str, Any]):
|
def __init__(self, model_variables: ModelVariables, tenant_info: Dict[str, Any]):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.model_variables = model_variables
|
current_app.logger.debug(f'Model variables type: {type(model_variables)}')
|
||||||
self.tenant_info = tenant_info
|
self._model_variables = model_variables
|
||||||
|
self._tenant_info = tenant_info
|
||||||
|
|
||||||
|
@property
|
||||||
|
def model_variables(self) -> ModelVariables:
|
||||||
|
return self._model_variables
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tenant_info(self) -> Dict[str, Any]:
|
||||||
|
return self._tenant_info
|
||||||
|
|
||||||
def _get_relevant_documents(self, query: str):
|
def _get_relevant_documents(self, query: str):
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
current_app.logger.debug(f'Retrieving relevant documents for query: {query}')
|
current_app.logger.debug(f'Retrieving relevant documents for query: {query}')
|
||||||
query_embedding = self._get_query_embedding(query)
|
query_embedding = self._get_query_embedding(query)
|
||||||
|
current_app.logger.debug(f'Model Variables Private: {type(self._model_variables)}')
|
||||||
|
current_app.logger.debug(f'Model Variables Property: {type(self.model_variables)}')
|
||||||
db_class = self.model_variables['embedding_db_model']
|
db_class = self.model_variables['embedding_db_model']
|
||||||
similarity_threshold = self.model_variables['similarity_threshold']
|
similarity_threshold = self.model_variables['similarity_threshold']
|
||||||
k = self.model_variables['k']
|
k = self.model_variables['k']
|
||||||
21
common/models/monitoring.py
Normal file
21
common/models/monitoring.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
from common.extensions import db
|
||||||
|
|
||||||
|
|
||||||
|
class BusinessEventLog(db.Model):
|
||||||
|
__bind_key__ = 'public'
|
||||||
|
__table_args__ = {'schema': 'public'}
|
||||||
|
|
||||||
|
id = db.Column(db.Integer, primary_key=True)
|
||||||
|
timestamp = db.Column(db.DateTime, nullable=False)
|
||||||
|
event_type = db.Column(db.String(50), nullable=False)
|
||||||
|
tenant_id = db.Column(db.Integer, nullable=False)
|
||||||
|
trace_id = db.Column(db.String(50), nullable=False)
|
||||||
|
span_id = db.Column(db.String(50))
|
||||||
|
span_name = db.Column(db.String(50))
|
||||||
|
parent_span_id = db.Column(db.String(50))
|
||||||
|
document_version_id = db.Column(db.Integer)
|
||||||
|
chat_session_id = db.Column(db.String(50))
|
||||||
|
interaction_id = db.Column(db.Integer)
|
||||||
|
environment = db.Column(db.String(20))
|
||||||
|
message = db.Column(db.Text)
|
||||||
|
# Add any other fields relevant for invoicing or warnings
|
||||||
@@ -2,7 +2,6 @@ from common.extensions import db
|
|||||||
from flask_security import UserMixin, RoleMixin
|
from flask_security import UserMixin, RoleMixin
|
||||||
from sqlalchemy.dialects.postgresql import ARRAY
|
from sqlalchemy.dialects.postgresql import ARRAY
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
from sqlalchemy import CheckConstraint
|
|
||||||
|
|
||||||
|
|
||||||
class Tenant(db.Model):
|
class Tenant(db.Model):
|
||||||
|
|||||||
114
common/utils/business_event.py
Normal file
114
common/utils/business_event.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Dict, Any, Optional
|
||||||
|
from datetime import datetime as dt, timezone as tz
|
||||||
|
from portkey_ai import Portkey, Config
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from .business_event_context import BusinessEventContext
|
||||||
|
from common.models.monitoring import BusinessEventLog
|
||||||
|
from common.extensions import db
|
||||||
|
|
||||||
|
|
||||||
|
class BusinessEvent:
|
||||||
|
# The BusinessEvent class itself is a context manager, but it doesn't use the @contextmanager decorator.
|
||||||
|
# Instead, it defines __enter__ and __exit__ methods explicitly. This is because we're doing something a bit more
|
||||||
|
# complex - we're interacting with the BusinessEventContext and the _business_event_stack.
|
||||||
|
|
||||||
|
def __init__(self, event_type: str, tenant_id: int, **kwargs):
|
||||||
|
self.event_type = event_type
|
||||||
|
self.tenant_id = tenant_id
|
||||||
|
self.trace_id = str(uuid.uuid4())
|
||||||
|
self.span_id = None
|
||||||
|
self.span_name = None
|
||||||
|
self.parent_span_id = None
|
||||||
|
self.document_version_id = kwargs.get('document_version_id')
|
||||||
|
self.chat_session_id = kwargs.get('chat_session_id')
|
||||||
|
self.interaction_id = kwargs.get('interaction_id')
|
||||||
|
self.environment = os.environ.get("FLASK_ENV", "development")
|
||||||
|
self.span_counter = 0
|
||||||
|
self.spans = []
|
||||||
|
|
||||||
|
def update_attribute(self, attribute: str, value: any):
|
||||||
|
if hasattr(self, attribute):
|
||||||
|
setattr(self, attribute, value)
|
||||||
|
else:
|
||||||
|
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute}'")
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def create_span(self, span_name: str):
|
||||||
|
# The create_span method is designed to be used as a context manager. We want to perform some actions when
|
||||||
|
# entering the span (like setting the span ID and name) and some actions when exiting the span (like removing
|
||||||
|
# these temporary attributes). The @contextmanager decorator allows us to write this method in a way that
|
||||||
|
# clearly separates the "entry" and "exit" logic, with the yield statement in between.
|
||||||
|
|
||||||
|
parent_span_id = self.span_id
|
||||||
|
self.span_counter += 1
|
||||||
|
new_span_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Save the current span info
|
||||||
|
self.spans.append((self.span_id, self.span_name, self.parent_span_id))
|
||||||
|
|
||||||
|
# Set the new span info
|
||||||
|
self.span_id = new_span_id
|
||||||
|
self.span_name = span_name
|
||||||
|
self.parent_span_id = parent_span_id
|
||||||
|
|
||||||
|
self.log(f"Starting span {span_name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
self.log(f"Ending span {span_name}")
|
||||||
|
# Restore the previous span info
|
||||||
|
if self.spans:
|
||||||
|
self.span_id, self.span_name, self.parent_span_id = self.spans.pop()
|
||||||
|
else:
|
||||||
|
self.span_id = None
|
||||||
|
self.span_name = None
|
||||||
|
self.parent_span_id = None
|
||||||
|
|
||||||
|
def log(self, message: str, level: str = 'info'):
|
||||||
|
logger = logging.getLogger('business_events')
|
||||||
|
log_data = {
|
||||||
|
'event_type': self.event_type,
|
||||||
|
'tenant_id': self.tenant_id,
|
||||||
|
'trace_id': self.trace_id,
|
||||||
|
'span_id': self.span_id,
|
||||||
|
'span_name': self.span_name,
|
||||||
|
'parent_span_id': self.parent_span_id,
|
||||||
|
'document_version_id': self.document_version_id,
|
||||||
|
'chat_session_id': self.chat_session_id,
|
||||||
|
'interaction_id': self.interaction_id,
|
||||||
|
'environment': self.environment
|
||||||
|
}
|
||||||
|
# log to Graylog
|
||||||
|
getattr(logger, level)(message, extra=log_data)
|
||||||
|
|
||||||
|
# Log to database
|
||||||
|
event_log = BusinessEventLog(
|
||||||
|
timestamp=dt.now(tz=tz.utc),
|
||||||
|
event_type=self.event_type,
|
||||||
|
tenant_id=self.tenant_id,
|
||||||
|
trace_id=self.trace_id,
|
||||||
|
span_id=self.span_id,
|
||||||
|
span_name=self.span_name,
|
||||||
|
parent_span_id=self.parent_span_id,
|
||||||
|
document_version_id=self.document_version_id,
|
||||||
|
chat_session_id=self.chat_session_id,
|
||||||
|
interaction_id=self.interaction_id,
|
||||||
|
environment=self.environment,
|
||||||
|
message=message
|
||||||
|
)
|
||||||
|
db.session.add(event_log)
|
||||||
|
db.session.commit()
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.log(f'Starting Trace for {self.event_type}')
|
||||||
|
return BusinessEventContext(self).__enter__()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.log(f'Ending Trace for {self.event_type}')
|
||||||
|
return BusinessEventContext(self).__exit__(exc_type, exc_val, exc_tb)
|
||||||
25
common/utils/business_event_context.py
Normal file
25
common/utils/business_event_context.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
from werkzeug.local import LocalProxy, LocalStack
|
||||||
|
|
||||||
|
_business_event_stack = LocalStack()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_current_event():
|
||||||
|
top = _business_event_stack.top
|
||||||
|
if top is None:
|
||||||
|
raise RuntimeError("No business event context found. Are you sure you're in a business event?")
|
||||||
|
return top
|
||||||
|
|
||||||
|
|
||||||
|
current_event = LocalProxy(_get_current_event)
|
||||||
|
|
||||||
|
|
||||||
|
class BusinessEventContext:
|
||||||
|
def __init__(self, event):
|
||||||
|
self.event = event
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
_business_event_stack.push(self.event)
|
||||||
|
return self.event
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
_business_event_stack.pop()
|
||||||
@@ -23,6 +23,14 @@ def cors_after_request(response, prefix):
|
|||||||
current_app.logger.debug(f'request.args: {request.args}')
|
current_app.logger.debug(f'request.args: {request.args}')
|
||||||
current_app.logger.debug(f'request is json?: {request.is_json}')
|
current_app.logger.debug(f'request is json?: {request.is_json}')
|
||||||
|
|
||||||
|
# Exclude health checks from checks
|
||||||
|
if request.path.startswith('/healthz') or request.path.startswith('/_healthz'):
|
||||||
|
current_app.logger.debug('Skipping CORS headers for health checks')
|
||||||
|
response.headers.add('Access-Control-Allow-Origin', '*')
|
||||||
|
response.headers.add('Access-Control-Allow-Headers', '*')
|
||||||
|
response.headers.add('Access-Control-Allow-Methods', '*')
|
||||||
|
return response
|
||||||
|
|
||||||
tenant_id = None
|
tenant_id = None
|
||||||
allowed_origins = []
|
allowed_origins = []
|
||||||
|
|
||||||
|
|||||||
@@ -5,14 +5,16 @@ from flask import current_app
|
|||||||
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
||||||
from langchain_anthropic import ChatAnthropic
|
from langchain_anthropic import ChatAnthropic
|
||||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||||
from langchain.prompts import ChatPromptTemplate
|
from typing import List, Any, Iterator
|
||||||
import ast
|
from collections.abc import MutableMapping
|
||||||
from typing import List
|
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
# from groq import Groq
|
|
||||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||||
|
from portkey_ai.langchain.portkey_langchain_callback_handler import LangchainCallbackHandler
|
||||||
|
|
||||||
from common.models.document import EmbeddingSmallOpenAI, EmbeddingLargeOpenAI
|
from common.models.document import EmbeddingSmallOpenAI, EmbeddingLargeOpenAI
|
||||||
|
from common.models.user import Tenant
|
||||||
|
from config.model_config import MODEL_CONFIG
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
class CitedAnswer(BaseModel):
|
class CitedAnswer(BaseModel):
|
||||||
@@ -36,180 +38,264 @@ def set_language_prompt_template(cls, language_prompt):
|
|||||||
cls.__doc__ = language_prompt
|
cls.__doc__ = language_prompt
|
||||||
|
|
||||||
|
|
||||||
|
class ModelVariables(MutableMapping):
|
||||||
|
def __init__(self, tenant: Tenant):
|
||||||
|
self.tenant = tenant
|
||||||
|
self._variables = self._initialize_variables()
|
||||||
|
self._embedding_model = None
|
||||||
|
self._llm = None
|
||||||
|
self._llm_no_rag = None
|
||||||
|
self._transcription_client = None
|
||||||
|
self._prompt_templates = {}
|
||||||
|
self._embedding_db_model = None
|
||||||
|
|
||||||
|
def _initialize_variables(self):
|
||||||
|
variables = {}
|
||||||
|
|
||||||
|
# We initialize the variables that are available knowing the tenant. For the other, we will apply 'lazy loading'
|
||||||
|
variables['k'] = self.tenant.es_k or 5
|
||||||
|
variables['similarity_threshold'] = self.tenant.es_similarity_threshold or 0.7
|
||||||
|
variables['RAG_temperature'] = self.tenant.chat_RAG_temperature or 0.3
|
||||||
|
variables['no_RAG_temperature'] = self.tenant.chat_no_RAG_temperature or 0.5
|
||||||
|
variables['embed_tuning'] = self.tenant.embed_tuning or False
|
||||||
|
variables['rag_tuning'] = self.tenant.rag_tuning or False
|
||||||
|
variables['rag_context'] = self.tenant.rag_context or " "
|
||||||
|
|
||||||
|
# Set HTML Chunking Variables
|
||||||
|
variables['html_tags'] = self.tenant.html_tags
|
||||||
|
variables['html_end_tags'] = self.tenant.html_end_tags
|
||||||
|
variables['html_included_elements'] = self.tenant.html_included_elements
|
||||||
|
variables['html_excluded_elements'] = self.tenant.html_excluded_elements
|
||||||
|
variables['html_excluded_classes'] = self.tenant.html_excluded_classes
|
||||||
|
|
||||||
|
# Set Chunk Size variables
|
||||||
|
variables['min_chunk_size'] = self.tenant.min_chunk_size
|
||||||
|
variables['max_chunk_size'] = self.tenant.max_chunk_size
|
||||||
|
|
||||||
|
# Set model providers
|
||||||
|
variables['embedding_provider'], variables['embedding_model'] = self.tenant.embedding_model.rsplit('.', 1)
|
||||||
|
variables['llm_provider'], variables['llm_model'] = self.tenant.llm_model.rsplit('.', 1)
|
||||||
|
variables["templates"] = current_app.config['PROMPT_TEMPLATES'][(f"{variables['llm_provider']}."
|
||||||
|
f"{variables['llm_model']}")]
|
||||||
|
current_app.logger.info(f"Loaded prompt templates: \n")
|
||||||
|
current_app.logger.info(f"{variables['templates']}")
|
||||||
|
|
||||||
|
# Set model-specific configurations
|
||||||
|
model_config = MODEL_CONFIG.get(variables['llm_provider'], {}).get(variables['llm_model'], {})
|
||||||
|
variables.update(model_config)
|
||||||
|
|
||||||
|
variables['annotation_chunk_length'] = current_app.config['ANNOTATION_TEXT_CHUNK_LENGTH'][self.tenant.llm_model]
|
||||||
|
|
||||||
|
if variables['tool_calling_supported']:
|
||||||
|
variables['cited_answer_cls'] = CitedAnswer
|
||||||
|
|
||||||
|
return variables
|
||||||
|
|
||||||
|
@property
|
||||||
|
def embedding_model(self):
|
||||||
|
portkey_metadata = self.get_portkey_metadata()
|
||||||
|
|
||||||
|
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||||
|
provider=self._variables['embedding_provider'],
|
||||||
|
metadata=portkey_metadata,
|
||||||
|
trace_id=current_event.trace_id,
|
||||||
|
span_id=current_event.span_id,
|
||||||
|
span_name=current_event.span_name,
|
||||||
|
parent_span_id=current_event.parent_span_id
|
||||||
|
)
|
||||||
|
api_key = os.getenv('OPENAI_API_KEY')
|
||||||
|
model = self._variables['embedding_model']
|
||||||
|
self._embedding_model = OpenAIEmbeddings(api_key=api_key,
|
||||||
|
model=model,
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
default_headers=portkey_headers)
|
||||||
|
self._embedding_db_model = EmbeddingSmallOpenAI \
|
||||||
|
if model == 'text-embedding-3-small' \
|
||||||
|
else EmbeddingLargeOpenAI
|
||||||
|
|
||||||
|
return self._embedding_model
|
||||||
|
|
||||||
|
@property
|
||||||
|
def llm(self):
|
||||||
|
portkey_headers = self.get_portkey_headers_for_llm()
|
||||||
|
api_key = self.get_api_key_for_llm()
|
||||||
|
self._llm = ChatOpenAI(api_key=api_key,
|
||||||
|
model=self._variables['llm_model'],
|
||||||
|
temperature=self._variables['RAG_temperature'],
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
default_headers=portkey_headers)
|
||||||
|
return self._llm
|
||||||
|
|
||||||
|
@property
|
||||||
|
def llm_no_rag(self):
|
||||||
|
portkey_headers = self.get_portkey_headers_for_llm()
|
||||||
|
api_key = self.get_api_key_for_llm()
|
||||||
|
self._llm_no_rag = ChatOpenAI(api_key=api_key,
|
||||||
|
model=self._variables['llm_model'],
|
||||||
|
temperature=self._variables['RAG_temperature'],
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
default_headers=portkey_headers)
|
||||||
|
return self._llm_no_rag
|
||||||
|
|
||||||
|
def get_portkey_headers_for_llm(self):
|
||||||
|
portkey_metadata = self.get_portkey_metadata()
|
||||||
|
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||||
|
metadata=portkey_metadata,
|
||||||
|
provider=self._variables['llm_provider'],
|
||||||
|
trace_id=current_event.trace_id,
|
||||||
|
span_id=current_event.span_id,
|
||||||
|
span_name=current_event.span_name,
|
||||||
|
parent_span_id=current_event.parent_span_id
|
||||||
|
)
|
||||||
|
return portkey_headers
|
||||||
|
|
||||||
|
def get_portkey_metadata(self):
|
||||||
|
environment = os.getenv('FLASK_ENV', 'development')
|
||||||
|
portkey_metadata = {'tenant_id': str(self.tenant.id),
|
||||||
|
'environment': environment,
|
||||||
|
'trace_id': current_event.trace_id,
|
||||||
|
'span_id': current_event.span_id,
|
||||||
|
'span_name': current_event.span_name,
|
||||||
|
'parent_span_id': current_event.parent_span_id,
|
||||||
|
}
|
||||||
|
return portkey_metadata
|
||||||
|
|
||||||
|
def get_api_key_for_llm(self):
|
||||||
|
if self._variables['llm_provider'] == 'openai':
|
||||||
|
api_key = os.getenv('OPENAI_API_KEY')
|
||||||
|
else: # self._variables['llm_provider'] == 'anthropic'
|
||||||
|
api_key = os.getenv('ANTHROPIC_API_KEY')
|
||||||
|
|
||||||
|
return api_key
|
||||||
|
|
||||||
|
# def _initialize_llm(self):
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# if self._variables['llm_provider'] == 'openai':
|
||||||
|
# portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||||
|
# metadata=portkey_metadata,
|
||||||
|
# provider='openai')
|
||||||
|
#
|
||||||
|
# self._llm = ChatOpenAI(api_key=api_key,
|
||||||
|
# model=self._variables['llm_model'],
|
||||||
|
# temperature=self._variables['RAG_temperature'],
|
||||||
|
# base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
# default_headers=portkey_headers)
|
||||||
|
# self._llm_no_rag = ChatOpenAI(api_key=api_key,
|
||||||
|
# model=self._variables['llm_model'],
|
||||||
|
# temperature=self._variables['no_RAG_temperature'],
|
||||||
|
# base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
# default_headers=portkey_headers)
|
||||||
|
# self._variables['tool_calling_supported'] = self._variables['llm_model'] in ['gpt-4o', 'gpt-4o-mini']
|
||||||
|
# elif self._variables['llm_provider'] == 'anthropic':
|
||||||
|
# api_key = os.getenv('ANTHROPIC_API_KEY')
|
||||||
|
# llm_model_ext = os.getenv('ANTHROPIC_LLM_VERSIONS', {}).get(self._variables['llm_model'])
|
||||||
|
# self._llm = ChatAnthropic(api_key=api_key,
|
||||||
|
# model=llm_model_ext,
|
||||||
|
# temperature=self._variables['RAG_temperature'])
|
||||||
|
# self._llm_no_rag = ChatAnthropic(api_key=api_key,
|
||||||
|
# model=llm_model_ext,
|
||||||
|
# temperature=self._variables['RAG_temperature'])
|
||||||
|
# self._variables['tool_calling_supported'] = True
|
||||||
|
# else:
|
||||||
|
# raise ValueError(f"Invalid chat provider: {self._variables['llm_provider']}")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def transcription_client(self):
|
||||||
|
environment = os.getenv('FLASK_ENV', 'development')
|
||||||
|
portkey_metadata = self.get_portkey_metadata()
|
||||||
|
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||||
|
metadata=portkey_metadata,
|
||||||
|
provider='openai',
|
||||||
|
trace_id=current_event.trace_id,
|
||||||
|
span_id=current_event.span_id,
|
||||||
|
span_name=current_event.span_name,
|
||||||
|
parent_span_id=current_event.parent_span_id
|
||||||
|
)
|
||||||
|
api_key = os.getenv('OPENAI_API_KEY')
|
||||||
|
self._transcription_client = OpenAI(api_key=api_key,
|
||||||
|
base_url=PORTKEY_GATEWAY_URL,
|
||||||
|
default_headers=portkey_headers)
|
||||||
|
self._variables['transcription_model'] = 'whisper-1'
|
||||||
|
return self._transcription_client
|
||||||
|
|
||||||
|
@property
|
||||||
|
def embedding_db_model(self):
|
||||||
|
if self._embedding_db_model is None:
|
||||||
|
self._embedding_db_model = self.get_embedding_db_model()
|
||||||
|
return self._embedding_db_model
|
||||||
|
|
||||||
|
def get_embedding_db_model(self):
|
||||||
|
current_app.logger.debug("In get_embedding_db_model")
|
||||||
|
if self._embedding_db_model is None:
|
||||||
|
self._embedding_db_model = EmbeddingSmallOpenAI \
|
||||||
|
if self._variables['embedding_model'] == 'text-embedding-3-small' \
|
||||||
|
else EmbeddingLargeOpenAI
|
||||||
|
current_app.logger.debug(f"Embedding DB Model: {self._embedding_db_model}")
|
||||||
|
return self._embedding_db_model
|
||||||
|
|
||||||
|
def get_prompt_template(self, template_name: str) -> str:
|
||||||
|
current_app.logger.info(f"Getting prompt template for {template_name}")
|
||||||
|
if template_name not in self._prompt_templates:
|
||||||
|
self._prompt_templates[template_name] = self._load_prompt_template(template_name)
|
||||||
|
return self._prompt_templates[template_name]
|
||||||
|
|
||||||
|
def _load_prompt_template(self, template_name: str) -> str:
|
||||||
|
# In the future, this method will make an API call to Portkey
|
||||||
|
# For now, we'll simulate it with a placeholder implementation
|
||||||
|
# You can replace this with your current prompt loading logic
|
||||||
|
return self._variables['templates'][template_name]
|
||||||
|
|
||||||
|
def __getitem__(self, key: str) -> Any:
|
||||||
|
current_app.logger.debug(f"ModelVariables: Getting {key}")
|
||||||
|
# Support older template names (suffix = _template)
|
||||||
|
if key.endswith('_template'):
|
||||||
|
key = key[:-len('_template')]
|
||||||
|
current_app.logger.debug(f"ModelVariables: Getting modified {key}")
|
||||||
|
if key == 'embedding_model':
|
||||||
|
return self.embedding_model
|
||||||
|
elif key == 'embedding_db_model':
|
||||||
|
return self.embedding_db_model
|
||||||
|
elif key == 'llm':
|
||||||
|
return self.llm
|
||||||
|
elif key == 'llm_no_rag':
|
||||||
|
return self.llm_no_rag
|
||||||
|
elif key == 'transcription_client':
|
||||||
|
return self.transcription_client
|
||||||
|
elif key in self._variables.get('prompt_templates', []):
|
||||||
|
return self.get_prompt_template(key)
|
||||||
|
return self._variables.get(key)
|
||||||
|
|
||||||
|
def __setitem__(self, key: str, value: Any) -> None:
|
||||||
|
self._variables[key] = value
|
||||||
|
|
||||||
|
def __delitem__(self, key: str) -> None:
|
||||||
|
del self._variables[key]
|
||||||
|
|
||||||
|
def __iter__(self) -> Iterator[str]:
|
||||||
|
return iter(self._variables)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._variables)
|
||||||
|
|
||||||
|
def get(self, key: str, default: Any = None) -> Any:
|
||||||
|
return self.__getitem__(key) or default
|
||||||
|
|
||||||
|
def update(self, **kwargs) -> None:
|
||||||
|
self._variables.update(kwargs)
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
return self._variables.items()
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return self._variables.keys()
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
return self._variables.values()
|
||||||
|
|
||||||
|
|
||||||
def select_model_variables(tenant):
|
def select_model_variables(tenant):
|
||||||
embedding_provider = tenant.embedding_model.rsplit('.', 1)[0]
|
model_variables = ModelVariables(tenant=tenant)
|
||||||
embedding_model = tenant.embedding_model.rsplit('.', 1)[1]
|
|
||||||
|
|
||||||
llm_provider = tenant.llm_model.rsplit('.', 1)[0]
|
|
||||||
llm_model = tenant.llm_model.rsplit('.', 1)[1]
|
|
||||||
|
|
||||||
# Set model variables
|
|
||||||
model_variables = {}
|
|
||||||
if tenant.es_k:
|
|
||||||
model_variables['k'] = tenant.es_k
|
|
||||||
else:
|
|
||||||
model_variables['k'] = 5
|
|
||||||
|
|
||||||
if tenant.es_similarity_threshold:
|
|
||||||
model_variables['similarity_threshold'] = tenant.es_similarity_threshold
|
|
||||||
else:
|
|
||||||
model_variables['similarity_threshold'] = 0.7
|
|
||||||
|
|
||||||
if tenant.chat_RAG_temperature:
|
|
||||||
model_variables['RAG_temperature'] = tenant.chat_RAG_temperature
|
|
||||||
else:
|
|
||||||
model_variables['RAG_temperature'] = 0.3
|
|
||||||
|
|
||||||
if tenant.chat_no_RAG_temperature:
|
|
||||||
model_variables['no_RAG_temperature'] = tenant.chat_no_RAG_temperature
|
|
||||||
else:
|
|
||||||
model_variables['no_RAG_temperature'] = 0.5
|
|
||||||
|
|
||||||
# Set Tuning variables
|
|
||||||
if tenant.embed_tuning:
|
|
||||||
model_variables['embed_tuning'] = tenant.embed_tuning
|
|
||||||
else:
|
|
||||||
model_variables['embed_tuning'] = False
|
|
||||||
|
|
||||||
if tenant.rag_tuning:
|
|
||||||
model_variables['rag_tuning'] = tenant.rag_tuning
|
|
||||||
else:
|
|
||||||
model_variables['rag_tuning'] = False
|
|
||||||
|
|
||||||
if tenant.rag_context:
|
|
||||||
model_variables['rag_context'] = tenant.rag_context
|
|
||||||
else:
|
|
||||||
model_variables['rag_context'] = " "
|
|
||||||
|
|
||||||
# Set HTML Chunking Variables
|
|
||||||
model_variables['html_tags'] = tenant.html_tags
|
|
||||||
model_variables['html_end_tags'] = tenant.html_end_tags
|
|
||||||
model_variables['html_included_elements'] = tenant.html_included_elements
|
|
||||||
model_variables['html_excluded_elements'] = tenant.html_excluded_elements
|
|
||||||
model_variables['html_excluded_classes'] = tenant.html_excluded_classes
|
|
||||||
|
|
||||||
# Set Chunk Size variables
|
|
||||||
model_variables['min_chunk_size'] = tenant.min_chunk_size
|
|
||||||
model_variables['max_chunk_size'] = tenant.max_chunk_size
|
|
||||||
|
|
||||||
environment = os.getenv('FLASK_ENV', 'development')
|
|
||||||
portkey_metadata = {'tenant_id': str(tenant.id), 'environment': environment}
|
|
||||||
|
|
||||||
# Set Embedding variables
|
|
||||||
match embedding_provider:
|
|
||||||
case 'openai':
|
|
||||||
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
|
||||||
provider='openai',
|
|
||||||
metadata=portkey_metadata)
|
|
||||||
match embedding_model:
|
|
||||||
case 'text-embedding-3-small':
|
|
||||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
||||||
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
|
||||||
model='text-embedding-3-small',
|
|
||||||
base_url=PORTKEY_GATEWAY_URL,
|
|
||||||
default_headers=portkey_headers
|
|
||||||
)
|
|
||||||
model_variables['embedding_db_model'] = EmbeddingSmallOpenAI
|
|
||||||
case 'text-embedding-3-large':
|
|
||||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
||||||
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
|
||||||
model='text-embedding-3-large',
|
|
||||||
base_url=PORTKEY_GATEWAY_URL,
|
|
||||||
default_headers=portkey_headers
|
|
||||||
)
|
|
||||||
model_variables['embedding_db_model'] = EmbeddingLargeOpenAI
|
|
||||||
case _:
|
|
||||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
||||||
f'error: Invalid embedding model')
|
|
||||||
case _:
|
|
||||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
||||||
f'error: Invalid embedding provider')
|
|
||||||
|
|
||||||
# Set Chat model variables
|
|
||||||
match llm_provider:
|
|
||||||
case 'openai':
|
|
||||||
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
|
||||||
metadata=portkey_metadata,
|
|
||||||
provider='openai')
|
|
||||||
tool_calling_supported = False
|
|
||||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
||||||
model_variables['llm'] = ChatOpenAI(api_key=api_key,
|
|
||||||
model=llm_model,
|
|
||||||
temperature=model_variables['RAG_temperature'],
|
|
||||||
base_url=PORTKEY_GATEWAY_URL,
|
|
||||||
default_headers=portkey_headers)
|
|
||||||
model_variables['llm_no_rag'] = ChatOpenAI(api_key=api_key,
|
|
||||||
model=llm_model,
|
|
||||||
temperature=model_variables['no_RAG_temperature'],
|
|
||||||
base_url=PORTKEY_GATEWAY_URL,
|
|
||||||
default_headers=portkey_headers)
|
|
||||||
tool_calling_supported = False
|
|
||||||
match llm_model:
|
|
||||||
case 'gpt-4o' | 'gpt-4o-mini':
|
|
||||||
tool_calling_supported = True
|
|
||||||
processing_chunk_size = 10000
|
|
||||||
processing_chunk_overlap = 200
|
|
||||||
processing_min_chunk_size = 8000
|
|
||||||
processing_max_chunk_size = 12000
|
|
||||||
case _:
|
|
||||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
||||||
f'error: Invalid chat model')
|
|
||||||
case 'anthropic':
|
|
||||||
api_key = current_app.config.get('ANTHROPIC_API_KEY')
|
|
||||||
# Anthropic does not have the same 'generic' model names as OpenAI
|
|
||||||
llm_model_ext = current_app.config.get('ANTHROPIC_LLM_VERSIONS').get(llm_model)
|
|
||||||
model_variables['llm'] = ChatAnthropic(api_key=api_key,
|
|
||||||
model=llm_model_ext,
|
|
||||||
temperature=model_variables['RAG_temperature'])
|
|
||||||
model_variables['llm_no_rag'] = ChatAnthropic(api_key=api_key,
|
|
||||||
model=llm_model_ext,
|
|
||||||
temperature=model_variables['RAG_temperature'])
|
|
||||||
tool_calling_supported = True
|
|
||||||
processing_chunk_size = 10000
|
|
||||||
processing_chunk_overlap = 200
|
|
||||||
processing_min_chunk_size = 8000
|
|
||||||
processing_max_chunk_size = 12000
|
|
||||||
case _:
|
|
||||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
|
||||||
f'error: Invalid chat provider')
|
|
||||||
|
|
||||||
model_variables['processing_chunk_size'] = processing_chunk_size
|
|
||||||
model_variables['processing_chunk_overlap'] = processing_chunk_overlap
|
|
||||||
model_variables['processing_min_chunk_size'] = processing_min_chunk_size
|
|
||||||
model_variables['processing_max_chunk_size'] = processing_max_chunk_size
|
|
||||||
|
|
||||||
if tool_calling_supported:
|
|
||||||
model_variables['cited_answer_cls'] = CitedAnswer
|
|
||||||
|
|
||||||
templates = current_app.config['PROMPT_TEMPLATES'][f'{llm_provider}.{llm_model}']
|
|
||||||
model_variables['summary_template'] = templates['summary']
|
|
||||||
model_variables['rag_template'] = templates['rag']
|
|
||||||
model_variables['history_template'] = templates['history']
|
|
||||||
model_variables['encyclopedia_template'] = templates['encyclopedia']
|
|
||||||
model_variables['transcript_template'] = templates['transcript']
|
|
||||||
model_variables['html_parse_template'] = templates['html_parse']
|
|
||||||
model_variables['pdf_parse_template'] = templates['pdf_parse']
|
|
||||||
|
|
||||||
model_variables['annotation_chunk_length'] = current_app.config['ANNOTATION_TEXT_CHUNK_LENGTH'][tenant.llm_model]
|
|
||||||
|
|
||||||
# Transcription Client Variables.
|
|
||||||
# Using Groq
|
|
||||||
# api_key = current_app.config.get('GROQ_API_KEY')
|
|
||||||
# model_variables['transcription_client'] = Groq(api_key=api_key)
|
|
||||||
# model_variables['transcription_model'] = 'whisper-large-v3'
|
|
||||||
|
|
||||||
# Using OpenAI for transcriptions
|
|
||||||
portkey_metadata = {'tenant_id': str(tenant.id)}
|
|
||||||
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
|
||||||
metadata=portkey_metadata,
|
|
||||||
provider='openai'
|
|
||||||
)
|
|
||||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
|
||||||
model_variables['transcription_client'] = OpenAI(api_key=api_key,
|
|
||||||
base_url=PORTKEY_GATEWAY_URL,
|
|
||||||
default_headers=portkey_headers)
|
|
||||||
model_variables['transcription_model'] = 'whisper-1'
|
|
||||||
|
|
||||||
return model_variables
|
return model_variables
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
99
common/utils/portkey_utils.py
Normal file
99
common/utils/portkey_utils.py
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
import requests
|
||||||
|
import json
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
# Define a function to make the GET request
|
||||||
|
def get_metadata_grouped_data(
|
||||||
|
api_key: str,
|
||||||
|
metadata_key: str,
|
||||||
|
time_of_generation_min: Optional[str] = None,
|
||||||
|
time_of_generation_max: Optional[str] = None,
|
||||||
|
total_units_min: Optional[int] = None,
|
||||||
|
total_units_max: Optional[int] = None,
|
||||||
|
cost_min: Optional[float] = None,
|
||||||
|
cost_max: Optional[float] = None,
|
||||||
|
prompt_token_min: Optional[int] = None,
|
||||||
|
prompt_token_max: Optional[int] = None,
|
||||||
|
completion_token_min: Optional[int] = None,
|
||||||
|
completion_token_max: Optional[int] = None,
|
||||||
|
status_code: Optional[str] = None,
|
||||||
|
weighted_feedback_min: Optional[float] = None,
|
||||||
|
weighted_feedback_max: Optional[float] = None,
|
||||||
|
virtual_keys: Optional[str] = None,
|
||||||
|
configs: Optional[str] = None,
|
||||||
|
workspace_slug: Optional[str] = None,
|
||||||
|
api_key_ids: Optional[str] = None,
|
||||||
|
current_page: Optional[int] = 1,
|
||||||
|
page_size: Optional[int] = 20,
|
||||||
|
metadata: Optional[str] = None,
|
||||||
|
ai_org_model: Optional[str] = None,
|
||||||
|
trace_id: Optional[str] = None,
|
||||||
|
span_id: Optional[str] = None,
|
||||||
|
):
|
||||||
|
url = f"https://api.portkey.ai/v1/analytics/groups/metadata/{metadata_key}"
|
||||||
|
|
||||||
|
# Set up query parameters
|
||||||
|
params = {
|
||||||
|
"time_of_generation_min": time_of_generation_min,
|
||||||
|
"time_of_generation_max": time_of_generation_max,
|
||||||
|
"total_units_min": total_units_min,
|
||||||
|
"total_units_max": total_units_max,
|
||||||
|
"cost_min": cost_min,
|
||||||
|
"cost_max": cost_max,
|
||||||
|
"prompt_token_min": prompt_token_min,
|
||||||
|
"prompt_token_max": prompt_token_max,
|
||||||
|
"completion_token_min": completion_token_min,
|
||||||
|
"completion_token_max": completion_token_max,
|
||||||
|
"status_code": status_code,
|
||||||
|
"weighted_feedback_min": weighted_feedback_min,
|
||||||
|
"weighted_feedback_max": weighted_feedback_max,
|
||||||
|
"virtual_keys": virtual_keys,
|
||||||
|
"configs": configs,
|
||||||
|
"workspace_slug": workspace_slug,
|
||||||
|
"api_key_ids": api_key_ids,
|
||||||
|
"current_page": current_page,
|
||||||
|
"page_size": page_size,
|
||||||
|
"metadata": metadata,
|
||||||
|
"ai_org_model": ai_org_model,
|
||||||
|
"trace_id": trace_id,
|
||||||
|
"span_id": span_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Remove any keys with None values
|
||||||
|
params = {k: v for k, v in params.items() if v is not None}
|
||||||
|
|
||||||
|
# Set up the headers
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {api_key}",
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Make the GET request
|
||||||
|
response = requests.get(url, headers=headers, params=params)
|
||||||
|
|
||||||
|
# Check for successful response
|
||||||
|
if response.status_code == 200:
|
||||||
|
return response.json() # Return JSON data
|
||||||
|
else:
|
||||||
|
response.raise_for_status() # Raise an exception for errors
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
# Replace 'your_api_key' and 'your_metadata_key' with actual values
|
||||||
|
api_key = 'your_api_key'
|
||||||
|
metadata_key = 'your_metadata_key'
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = get_metadata_grouped_data(
|
||||||
|
api_key=api_key,
|
||||||
|
metadata_key=metadata_key,
|
||||||
|
time_of_generation_min="2024-08-23T15:50:23+05:30",
|
||||||
|
time_of_generation_max="2024-09-23T15:50:23+05:30",
|
||||||
|
total_units_min=100,
|
||||||
|
total_units_max=1000,
|
||||||
|
cost_min=10,
|
||||||
|
cost_max=100,
|
||||||
|
status_code="200,201"
|
||||||
|
)
|
||||||
|
print(json.dumps(data, indent=4))
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error occurred: {str(e)}")
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from flask import flash
|
from flask import flash, current_app
|
||||||
|
|
||||||
|
|
||||||
def prepare_table(model_objects, column_names):
|
def prepare_table(model_objects, column_names):
|
||||||
@@ -44,7 +44,8 @@ def form_validation_failed(request, form):
|
|||||||
for fieldName, errorMessages in form.errors.items():
|
for fieldName, errorMessages in form.errors.items():
|
||||||
for err in errorMessages:
|
for err in errorMessages:
|
||||||
flash(f"Error in {fieldName}: {err}", 'danger')
|
flash(f"Error in {fieldName}: {err}", 'danger')
|
||||||
|
current_app.logger.debug(f"Error in {fieldName}: {err}", 'danger')
|
||||||
|
|
||||||
|
|
||||||
def form_to_dict(form):
|
def form_to_dict(form):
|
||||||
return {field.name: field.data for field in form if field.name != 'csrf_token' and hasattr(field, 'data')}
|
return {field.name: field.data for field in form if field.name != 'csrf_token' and hasattr(field, 'data')}
|
||||||
|
|||||||
@@ -137,6 +137,12 @@ class Config(object):
|
|||||||
MAIL_PASSWORD = environ.get('MAIL_PASSWORD')
|
MAIL_PASSWORD = environ.get('MAIL_PASSWORD')
|
||||||
MAIL_DEFAULT_SENDER = ('eveAI Admin', MAIL_USERNAME)
|
MAIL_DEFAULT_SENDER = ('eveAI Admin', MAIL_USERNAME)
|
||||||
|
|
||||||
|
# Langsmith settings
|
||||||
|
LANGCHAIN_TRACING_V2 = True
|
||||||
|
LANGCHAIN_ENDPOINT = 'https://api.smith.langchain.com'
|
||||||
|
LANGCHAIN_PROJECT = "eveai"
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_FILE_TYPES = ['pdf', 'html', 'md', 'txt', 'mp3', 'mp4', 'ogg', 'srt']
|
SUPPORTED_FILE_TYPES = ['pdf', 'html', 'md', 'txt', 'mp3', 'mp4', 'ogg', 'srt']
|
||||||
|
|
||||||
TENANT_TYPES = ['Active', 'Demo', 'Inactive', 'Test']
|
TENANT_TYPES = ['Active', 'Demo', 'Inactive', 'Test']
|
||||||
|
|||||||
@@ -12,7 +12,12 @@ env = os.environ.get('FLASK_ENV', 'development')
|
|||||||
class CustomLogRecord(logging.LogRecord):
|
class CustomLogRecord(logging.LogRecord):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.component = os.environ.get('COMPONENT_NAME', 'eveai_app') # Set default component value here
|
self.component = os.environ.get('COMPONENT_NAME', 'eveai_app')
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
if name not in {'event_type', 'tenant_id', 'trace_id', 'span_id', 'span_name', 'parent_span_id',
|
||||||
|
'document_version_id', 'chat_session_id', 'interaction_id', 'environment'}:
|
||||||
|
super().__setattr__(name, value)
|
||||||
|
|
||||||
|
|
||||||
def custom_log_record_factory(*args, **kwargs):
|
def custom_log_record_factory(*args, **kwargs):
|
||||||
@@ -108,6 +113,14 @@ LOGGING = {
|
|||||||
'backupCount': 10,
|
'backupCount': 10,
|
||||||
'formatter': 'standard',
|
'formatter': 'standard',
|
||||||
},
|
},
|
||||||
|
'file_business_events': {
|
||||||
|
'level': 'INFO',
|
||||||
|
'class': 'logging.handlers.RotatingFileHandler',
|
||||||
|
'filename': 'logs/business_events.log',
|
||||||
|
'maxBytes': 1024 * 1024 * 5, # 5MB
|
||||||
|
'backupCount': 10,
|
||||||
|
'formatter': 'standard',
|
||||||
|
},
|
||||||
'console': {
|
'console': {
|
||||||
'class': 'logging.StreamHandler',
|
'class': 'logging.StreamHandler',
|
||||||
'level': 'DEBUG',
|
'level': 'DEBUG',
|
||||||
@@ -184,6 +197,11 @@ LOGGING = {
|
|||||||
'level': 'DEBUG',
|
'level': 'DEBUG',
|
||||||
'propagate': False
|
'propagate': False
|
||||||
},
|
},
|
||||||
|
'business_events': {
|
||||||
|
'handlers': ['file_business_events', 'graylog'],
|
||||||
|
'level': 'DEBUG',
|
||||||
|
'propagate': False
|
||||||
|
},
|
||||||
'': { # root logger
|
'': { # root logger
|
||||||
'handlers': ['console'],
|
'handlers': ['console'],
|
||||||
'level': 'WARNING', # Set higher level for root to minimize noise
|
'level': 'WARNING', # Set higher level for root to minimize noise
|
||||||
|
|||||||
41
config/model_config.py
Normal file
41
config/model_config.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
MODEL_CONFIG = {
|
||||||
|
"openai": {
|
||||||
|
"gpt-4o": {
|
||||||
|
"tool_calling_supported": True,
|
||||||
|
"processing_chunk_size": 10000,
|
||||||
|
"processing_chunk_overlap": 200,
|
||||||
|
"processing_min_chunk_size": 8000,
|
||||||
|
"processing_max_chunk_size": 12000,
|
||||||
|
"prompt_templates": [
|
||||||
|
"summary", "rag", "history", "encyclopedia",
|
||||||
|
"transcript", "html_parse", "pdf_parse"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"gpt-4o-mini": {
|
||||||
|
"tool_calling_supported": True,
|
||||||
|
"processing_chunk_size": 10000,
|
||||||
|
"processing_chunk_overlap": 200,
|
||||||
|
"processing_min_chunk_size": 8000,
|
||||||
|
"processing_max_chunk_size": 12000,
|
||||||
|
"prompt_templates": [
|
||||||
|
"summary", "rag", "history", "encyclopedia",
|
||||||
|
"transcript", "html_parse", "pdf_parse"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
# Add other OpenAI models here
|
||||||
|
},
|
||||||
|
"anthropic": {
|
||||||
|
"claude-3-5-sonnet": {
|
||||||
|
"tool_calling_supported": True,
|
||||||
|
"processing_chunk_size": 10000,
|
||||||
|
"processing_chunk_overlap": 200,
|
||||||
|
"processing_min_chunk_size": 8000,
|
||||||
|
"processing_max_chunk_size": 12000,
|
||||||
|
"prompt_templates": [
|
||||||
|
"summary", "rag", "history", "encyclopedia",
|
||||||
|
"transcript", "html_parse", "pdf_parse"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
# Add other Anthropic models here
|
||||||
|
},
|
||||||
|
}
|
||||||
@@ -141,7 +141,7 @@ if [ $# -eq 0 ]; then
|
|||||||
SERVICES=()
|
SERVICES=()
|
||||||
while IFS= read -r line; do
|
while IFS= read -r line; do
|
||||||
SERVICES+=("$line")
|
SERVICES+=("$line")
|
||||||
done < <(yq e '.services | keys | .[]' compose_dev.yaml | grep -E '^(nginx|eveai_)')
|
done < <(yq e '.services | keys | .[]' compose_dev.yaml | grep -E '^(nginx|eveai_|flower)')
|
||||||
else
|
else
|
||||||
SERVICES=("$@")
|
SERVICES=("$@")
|
||||||
fi
|
fi
|
||||||
@@ -158,7 +158,7 @@ docker buildx use eveai_builder
|
|||||||
|
|
||||||
# Loop through services
|
# Loop through services
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
if [[ "$SERVICE" == "nginx" || "$SERVICE" == eveai_* ]]; then
|
if [[ "$SERVICE" == "nginx" || "$SERVICE" == eveai_* || "$SERVICE" == "flower" ]]; then
|
||||||
if process_service "$SERVICE"; then
|
if process_service "$SERVICE"; then
|
||||||
echo "Successfully processed $SERVICE"
|
echo "Successfully processed $SERVICE"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ x-common-variables: &common-variables
|
|||||||
MAIL_PASSWORD: '$$6xsWGbNtx$$CFMQZqc*'
|
MAIL_PASSWORD: '$$6xsWGbNtx$$CFMQZqc*'
|
||||||
MAIL_SERVER: mail.flow-it.net
|
MAIL_SERVER: mail.flow-it.net
|
||||||
MAIL_PORT: 465
|
MAIL_PORT: 465
|
||||||
|
REDIS_URL: redis
|
||||||
|
REDIS_PORT: '6379'
|
||||||
OPENAI_API_KEY: 'sk-proj-8R0jWzwjL7PeoPyMhJTZT3BlbkFJLb6HfRB2Hr9cEVFWEhU7'
|
OPENAI_API_KEY: 'sk-proj-8R0jWzwjL7PeoPyMhJTZT3BlbkFJLb6HfRB2Hr9cEVFWEhU7'
|
||||||
GROQ_API_KEY: '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'
|
GROQ_API_KEY: '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'
|
||||||
ANTHROPIC_API_KEY: 'sk-ant-api03-c2TmkzbReeGhXBO5JxNH6BJNylRDonc9GmZd0eRbrvyekec2'
|
ANTHROPIC_API_KEY: 'sk-ant-api03-c2TmkzbReeGhXBO5JxNH6BJNylRDonc9GmZd0eRbrvyekec2'
|
||||||
@@ -32,6 +34,7 @@ x-common-variables: &common-variables
|
|||||||
MINIO_ACCESS_KEY: minioadmin
|
MINIO_ACCESS_KEY: minioadmin
|
||||||
MINIO_SECRET_KEY: minioadmin
|
MINIO_SECRET_KEY: minioadmin
|
||||||
NGINX_SERVER_NAME: 'localhost http://macstudio.ask-eve-ai-local.com/'
|
NGINX_SERVER_NAME: 'localhost http://macstudio.ask-eve-ai-local.com/'
|
||||||
|
LANGCHAIN_API_KEY: "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"
|
||||||
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
@@ -264,6 +267,22 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- eveai-network
|
- eveai-network
|
||||||
|
|
||||||
|
flower:
|
||||||
|
image: josakola/flower:latest
|
||||||
|
build:
|
||||||
|
context: ..
|
||||||
|
dockerfile: ./docker/flower/Dockerfile
|
||||||
|
environment:
|
||||||
|
<<: *common-variables
|
||||||
|
volumes:
|
||||||
|
- ../scripts:/app/scripts
|
||||||
|
ports:
|
||||||
|
- "5555:5555"
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
networks:
|
||||||
|
- eveai-network
|
||||||
|
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio
|
image: minio/minio
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
@@ -21,11 +21,13 @@ x-common-variables: &common-variables
|
|||||||
MAIL_USERNAME: 'evie_admin@askeveai.com'
|
MAIL_USERNAME: 'evie_admin@askeveai.com'
|
||||||
MAIL_PASSWORD: 's5D%R#y^v!s&6Z^i0k&'
|
MAIL_PASSWORD: 's5D%R#y^v!s&6Z^i0k&'
|
||||||
MAIL_SERVER: mail.askeveai.com
|
MAIL_SERVER: mail.askeveai.com
|
||||||
MAIL_PORT: 465
|
MAIL_PORT: '465'
|
||||||
REDIS_USER: eveai
|
REDIS_USER: eveai
|
||||||
REDIS_PASS: 'jHliZwGD36sONgbm0fc6SOpzLbknqq4RNF8K'
|
REDIS_PASS: 'jHliZwGD36sONgbm0fc6SOpzLbknqq4RNF8K'
|
||||||
REDIS_URL: 8bciqc.stackhero-network.com
|
REDIS_URL: 8bciqc.stackhero-network.com
|
||||||
REDIS_PORT: '9961'
|
REDIS_PORT: '9961'
|
||||||
|
FLOWER_USER: 'Felucia'
|
||||||
|
FLOWER_PASSWORD: 'Jungles'
|
||||||
OPENAI_API_KEY: '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'
|
OPENAI_API_KEY: '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'
|
||||||
GROQ_API_KEY: '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'
|
GROQ_API_KEY: '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'
|
||||||
ANTHROPIC_API_KEY: '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'
|
ANTHROPIC_API_KEY: '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'
|
||||||
@@ -38,6 +40,7 @@ x-common-variables: &common-variables
|
|||||||
MINIO_ACCESS_KEY: 04JKmQln8PQpyTmMiCPc
|
MINIO_ACCESS_KEY: 04JKmQln8PQpyTmMiCPc
|
||||||
MINIO_SECRET_KEY: 2PEZAD1nlpAmOyDV0TUTuJTQw1qVuYLF3A7GMs0D
|
MINIO_SECRET_KEY: 2PEZAD1nlpAmOyDV0TUTuJTQw1qVuYLF3A7GMs0D
|
||||||
NGINX_SERVER_NAME: 'evie.askeveai.com mxz536.stackhero-network.com'
|
NGINX_SERVER_NAME: 'evie.askeveai.com mxz536.stackhero-network.com'
|
||||||
|
LANGCHAIN_API_KEY: "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"
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
eveai-network:
|
eveai-network:
|
||||||
@@ -53,10 +56,6 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
<<: *common-variables
|
<<: *common-variables
|
||||||
volumes:
|
volumes:
|
||||||
# - ../nginx:/etc/nginx
|
|
||||||
# - ../nginx/sites-enabled:/etc/nginx/sites-enabled
|
|
||||||
# - ../nginx/static:/etc/nginx/static
|
|
||||||
# - ../nginx/public:/etc/nginx/public
|
|
||||||
- eveai_logs:/var/log/nginx
|
- eveai_logs:/var/log/nginx
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
@@ -81,7 +80,7 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- eveai_logs:/app/logs
|
- eveai_logs:/app/logs
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:5001/health"]
|
test: ["CMD", "curl", "-f", "http://localhost:5001/healthz/ready"]
|
||||||
interval: 10s
|
interval: 10s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
@@ -91,18 +90,11 @@ services:
|
|||||||
eveai_workers:
|
eveai_workers:
|
||||||
platform: linux/amd64
|
platform: linux/amd64
|
||||||
image: josakola/eveai_workers:latest
|
image: josakola/eveai_workers:latest
|
||||||
# ports:
|
|
||||||
# - 5001:5001
|
|
||||||
environment:
|
environment:
|
||||||
<<: *common-variables
|
<<: *common-variables
|
||||||
COMPONENT_NAME: eveai_workers
|
COMPONENT_NAME: eveai_workers
|
||||||
volumes:
|
volumes:
|
||||||
- eveai_logs:/app/logs
|
- eveai_logs:/app/logs
|
||||||
# healthcheck:
|
|
||||||
# test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
|
||||||
# interval: 10s
|
|
||||||
# timeout: 5s
|
|
||||||
# retries: 5
|
|
||||||
networks:
|
networks:
|
||||||
- eveai-network
|
- eveai-network
|
||||||
|
|
||||||
@@ -117,7 +109,7 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- eveai_logs:/app/logs
|
- eveai_logs:/app/logs
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://localhost:5002/health" ] # Adjust based on your health endpoint
|
test: [ "CMD", "curl", "-f", "http://localhost:5002/healthz/ready" ] # Adjust based on your health endpoint
|
||||||
interval: 10s
|
interval: 10s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
@@ -127,18 +119,11 @@ services:
|
|||||||
eveai_chat_workers:
|
eveai_chat_workers:
|
||||||
platform: linux/amd64
|
platform: linux/amd64
|
||||||
image: josakola/eveai_chat_workers:latest
|
image: josakola/eveai_chat_workers:latest
|
||||||
# ports:
|
|
||||||
# - 5001:5001
|
|
||||||
environment:
|
environment:
|
||||||
<<: *common-variables
|
<<: *common-variables
|
||||||
COMPONENT_NAME: eveai_chat_workers
|
COMPONENT_NAME: eveai_chat_workers
|
||||||
volumes:
|
volumes:
|
||||||
- eveai_logs:/app/logs
|
- eveai_logs:/app/logs
|
||||||
# healthcheck:
|
|
||||||
# test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
|
||||||
# interval: 10s
|
|
||||||
# timeout: 5s
|
|
||||||
# retries: 5
|
|
||||||
networks:
|
networks:
|
||||||
- eveai-network
|
- eveai-network
|
||||||
|
|
||||||
@@ -153,20 +138,23 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- eveai_logs:/app/logs
|
- eveai_logs:/app/logs
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
test: [ "CMD", "curl", "-f", "http://localhost:5003/healthz/ready" ]
|
||||||
interval: 10s
|
interval: 10s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
networks:
|
networks:
|
||||||
- eveai-network
|
- eveai-network
|
||||||
|
|
||||||
|
flower:
|
||||||
|
image: josakola/flower:latest
|
||||||
|
environment:
|
||||||
|
<<: *common-variables
|
||||||
|
ports:
|
||||||
|
- "5555:5555"
|
||||||
|
networks:
|
||||||
|
- eveai-network
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
eveai_logs:
|
eveai_logs:
|
||||||
# miniAre theo_data:
|
|
||||||
# db-data:
|
|
||||||
# redis-data:
|
|
||||||
# tenant-files:
|
|
||||||
#secrets:
|
|
||||||
# db-password:
|
|
||||||
# file: ./db/password.txt
|
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ RUN apt-get update && apt-get install -y \
|
|||||||
build-essential \
|
build-essential \
|
||||||
gcc \
|
gcc \
|
||||||
postgresql-client \
|
postgresql-client \
|
||||||
|
curl \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ RUN apt-get update && apt-get install -y \
|
|||||||
build-essential \
|
build-essential \
|
||||||
gcc \
|
gcc \
|
||||||
postgresql-client \
|
postgresql-client \
|
||||||
|
curl \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ RUN apt-get update && apt-get install -y \
|
|||||||
build-essential \
|
build-essential \
|
||||||
gcc \
|
gcc \
|
||||||
postgresql-client \
|
postgresql-client \
|
||||||
|
curl \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|||||||
34
docker/flower/Dockerfile
Normal file
34
docker/flower/Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
ARG PYTHON_VERSION=3.12.3
|
||||||
|
FROM python:${PYTHON_VERSION}-slim as base
|
||||||
|
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
ARG UID=10001
|
||||||
|
RUN adduser \
|
||||||
|
--disabled-password \
|
||||||
|
--gecos "" \
|
||||||
|
--home "/nonexistent" \
|
||||||
|
--shell "/bin/bash" \
|
||||||
|
--no-create-home \
|
||||||
|
--uid "${UID}" \
|
||||||
|
appuser
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
build-essential \
|
||||||
|
gcc \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
COPY requirements.txt /app/
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
COPY . /app
|
||||||
|
COPY scripts/start_flower.sh /app/start_flower.sh
|
||||||
|
RUN chmod a+x /app/start_flower.sh
|
||||||
|
|
||||||
|
USER appuser
|
||||||
|
|
||||||
|
CMD ["/app/start_flower.sh"]
|
||||||
@@ -76,20 +76,24 @@ def create_app(config_file=None):
|
|||||||
app.logger.debug('Token request detected, skipping JWT verification')
|
app.logger.debug('Token request detected, skipping JWT verification')
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
# Check if this a health check request
|
||||||
verify_jwt_in_request(optional=True)
|
if request.path.startswith('/_healthz') or request.path.startswith('/healthz'):
|
||||||
tenant_id = get_jwt_identity()
|
app.logger.debug('Health check request detected, skipping JWT verification')
|
||||||
app.logger.debug(f'Tenant ID from JWT: {tenant_id}')
|
else:
|
||||||
|
try:
|
||||||
|
verify_jwt_in_request(optional=True)
|
||||||
|
tenant_id = get_jwt_identity()
|
||||||
|
app.logger.debug(f'Tenant ID from JWT: {tenant_id}')
|
||||||
|
|
||||||
if tenant_id:
|
if tenant_id:
|
||||||
Database(tenant_id).switch_schema()
|
Database(tenant_id).switch_schema()
|
||||||
app.logger.debug(f'Switched to schema for tenant {tenant_id}')
|
app.logger.debug(f'Switched to schema for tenant {tenant_id}')
|
||||||
else:
|
else:
|
||||||
app.logger.debug('No tenant ID found in JWT')
|
app.logger.debug('No tenant ID found in JWT')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
app.logger.error(f'Error in before_request: {str(e)}')
|
app.logger.error(f'Error in before_request: {str(e)}')
|
||||||
# Don't raise the exception here, let the request continue
|
# Don't raise the exception here, let the request continue
|
||||||
# The appropriate error handling will be done in the specific endpoints
|
# The appropriate error handling will be done in the specific endpoints
|
||||||
|
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ def liveness():
|
|||||||
def readiness():
|
def readiness():
|
||||||
checks = {
|
checks = {
|
||||||
"database": check_database(),
|
"database": check_database(),
|
||||||
"celery": check_celery(),
|
# "celery": check_celery(),
|
||||||
"minio": check_minio(),
|
"minio": check_minio(),
|
||||||
# Add more checks as needed
|
# Add more checks as needed
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ from common.extensions import (db, migrate, bootstrap, security, mail, login_man
|
|||||||
minio_client, simple_encryption, metrics)
|
minio_client, simple_encryption, metrics)
|
||||||
from common.models.user import User, Role, Tenant, TenantDomain
|
from common.models.user import User, Role, Tenant, TenantDomain
|
||||||
import common.models.interaction
|
import common.models.interaction
|
||||||
|
import common.models.monitoring
|
||||||
|
import common.models.document
|
||||||
from common.utils.nginx_utils import prefixed_url_for
|
from common.utils.nginx_utils import prefixed_url_for
|
||||||
from config.logging_config import LOGGING
|
from config.logging_config import LOGGING
|
||||||
from common.utils.security import set_tenant_session_data
|
from common.utils.security import set_tenant_session_data
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ def check_database():
|
|||||||
def check_celery():
|
def check_celery():
|
||||||
try:
|
try:
|
||||||
# Send a simple task to Celery
|
# Send a simple task to Celery
|
||||||
result = current_celery.send_task('tasks.ping', queue='embeddings')
|
result = current_celery.send_task('ping', queue='embeddings')
|
||||||
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
||||||
return response == 'pong'
|
return response == 'pong'
|
||||||
except CeleryTimeoutError:
|
except CeleryTimeoutError:
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ class TenantForm(FlaskForm):
|
|||||||
# Initialize fallback algorithms
|
# Initialize fallback algorithms
|
||||||
self.fallback_algorithms.choices = \
|
self.fallback_algorithms.choices = \
|
||||||
[(algorithm, algorithm.lower()) for algorithm in current_app.config['FALLBACK_ALGORITHMS']]
|
[(algorithm, algorithm.lower()) for algorithm in current_app.config['FALLBACK_ALGORITHMS']]
|
||||||
self.type.choices = [('', 'Select Type')] + [(t, t) for t in current_app.config['TENANT_TYPES']]
|
self.type.choices = [(t, t) for t in current_app.config['TENANT_TYPES']]
|
||||||
|
|
||||||
|
|
||||||
class BaseUserForm(FlaskForm):
|
class BaseUserForm(FlaskForm):
|
||||||
|
|||||||
@@ -129,6 +129,7 @@ def edit_tenant(tenant_id):
|
|||||||
form.html_excluded_classes.data = ', '.join(tenant.html_excluded_classes)
|
form.html_excluded_classes.data = ', '.join(tenant.html_excluded_classes)
|
||||||
|
|
||||||
if form.validate_on_submit():
|
if form.validate_on_submit():
|
||||||
|
current_app.logger.debug(f'Updating tenant {tenant_id}')
|
||||||
# Populate the tenant with form data
|
# Populate the tenant with form data
|
||||||
form.populate_obj(tenant)
|
form.populate_obj(tenant)
|
||||||
# Then handle the special fields manually
|
# Then handle the special fields manually
|
||||||
@@ -148,6 +149,7 @@ def edit_tenant(tenant_id):
|
|||||||
session['tenant'] = tenant.to_dict()
|
session['tenant'] = tenant.to_dict()
|
||||||
# return redirect(url_for(f"user/tenant/tenant_id"))
|
# return redirect(url_for(f"user/tenant/tenant_id"))
|
||||||
else:
|
else:
|
||||||
|
current_app.logger.debug(f'Tenant update failed with errors: {form.errors}')
|
||||||
form_validation_failed(request, form)
|
form_validation_failed(request, form)
|
||||||
|
|
||||||
return render_template('user/edit_tenant.html', form=form, tenant_id=tenant_id)
|
return render_template('user/edit_tenant.html', form=form, tenant_id=tenant_id)
|
||||||
|
|||||||
@@ -60,7 +60,6 @@ def register_extensions(app):
|
|||||||
session.init_app(app)
|
session.init_app(app)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def register_blueprints(app):
|
def register_blueprints(app):
|
||||||
from views.healthz_views import healthz_bp
|
from views.healthz_views import healthz_bp
|
||||||
app.register_blueprint(healthz_bp)
|
app.register_blueprint(healthz_bp)
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ def check_database():
|
|||||||
def check_celery():
|
def check_celery():
|
||||||
try:
|
try:
|
||||||
# Send a simple task to Celery
|
# Send a simple task to Celery
|
||||||
result = current_celery.send_task('tasks.ping', queue='llm_interactions')
|
result = current_celery.send_task('ping', queue='llm_interactions')
|
||||||
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
||||||
return response == 'pong'
|
return response == 'pong'
|
||||||
except CeleryTimeoutError:
|
except CeleryTimeoutError:
|
||||||
|
|||||||
@@ -22,8 +22,10 @@ from common.models.interaction import ChatSession, Interaction, InteractionEmbed
|
|||||||
from common.extensions import db
|
from common.extensions import db
|
||||||
from common.utils.celery_utils import current_celery
|
from common.utils.celery_utils import current_celery
|
||||||
from common.utils.model_utils import select_model_variables, create_language_template, replace_variable_in_template
|
from common.utils.model_utils import select_model_variables, create_language_template, replace_variable_in_template
|
||||||
from common.langchain.EveAIRetriever import EveAIRetriever
|
from common.langchain.eveai_retriever import EveAIRetriever
|
||||||
from common.langchain.EveAIHistoryRetriever import EveAIHistoryRetriever
|
from common.langchain.eveai_history_retriever import EveAIHistoryRetriever
|
||||||
|
from common.utils.business_event import BusinessEvent
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
# Healthcheck task
|
# Healthcheck task
|
||||||
@@ -33,7 +35,10 @@ def ping():
|
|||||||
|
|
||||||
|
|
||||||
def detail_question(question, language, model_variables, session_id):
|
def detail_question(question, language, model_variables, session_id):
|
||||||
retriever = EveAIHistoryRetriever(model_variables, session_id)
|
current_app.logger.debug(f'Detail question: {question}')
|
||||||
|
current_app.logger.debug(f'model_varialbes: {model_variables}')
|
||||||
|
current_app.logger.debug(f'session_id: {session_id}')
|
||||||
|
retriever = EveAIHistoryRetriever(model_variables=model_variables, session_id=session_id)
|
||||||
llm = model_variables['llm']
|
llm = model_variables['llm']
|
||||||
template = model_variables['history_template']
|
template = model_variables['history_template']
|
||||||
language_template = create_language_template(template, language)
|
language_template = create_language_template(template, language)
|
||||||
@@ -62,53 +67,56 @@ def ask_question(tenant_id, question, language, session_id, user_timezone, room)
|
|||||||
'interaction_id': 'interaction_id_value'
|
'interaction_id': 'interaction_id_value'
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
current_app.logger.info(f'ask_question: Received question for tenant {tenant_id}: {question}. Processing...')
|
with BusinessEvent("Ask Question", tenant_id=tenant_id, chat_session_id=session_id):
|
||||||
|
current_app.logger.info(f'ask_question: Received question for tenant {tenant_id}: {question}. Processing...')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Retrieve the tenant
|
# Retrieve the tenant
|
||||||
tenant = Tenant.query.get(tenant_id)
|
tenant = Tenant.query.get(tenant_id)
|
||||||
if not tenant:
|
if not tenant:
|
||||||
raise Exception(f'Tenant {tenant_id} not found.')
|
raise Exception(f'Tenant {tenant_id} not found.')
|
||||||
|
|
||||||
# Ensure we are working in the correct database schema
|
# Ensure we are working in the correct database schema
|
||||||
Database(tenant_id).switch_schema()
|
Database(tenant_id).switch_schema()
|
||||||
|
|
||||||
# Ensure we have a session to story history
|
# Ensure we have a session to story history
|
||||||
chat_session = ChatSession.query.filter_by(session_id=session_id).first()
|
chat_session = ChatSession.query.filter_by(session_id=session_id).first()
|
||||||
if not chat_session:
|
if not chat_session:
|
||||||
try:
|
try:
|
||||||
chat_session = ChatSession()
|
chat_session = ChatSession()
|
||||||
chat_session.session_id = session_id
|
chat_session.session_id = session_id
|
||||||
chat_session.session_start = dt.now(tz.utc)
|
chat_session.session_start = dt.now(tz.utc)
|
||||||
chat_session.timezone = user_timezone
|
chat_session.timezone = user_timezone
|
||||||
db.session.add(chat_session)
|
db.session.add(chat_session)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
except SQLAlchemyError as e:
|
except SQLAlchemyError as e:
|
||||||
current_app.logger.error(f'ask_question: Error initializing chat session in database: {e}')
|
current_app.logger.error(f'ask_question: Error initializing chat session in database: {e}')
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if tenant.rag_tuning:
|
if tenant.rag_tuning:
|
||||||
current_app.rag_tuning_logger.debug(f'Received question for tenant {tenant_id}:\n{question}. Processing...')
|
current_app.rag_tuning_logger.debug(f'Received question for tenant {tenant_id}:\n{question}. Processing...')
|
||||||
current_app.rag_tuning_logger.debug(f'Tenant Information: \n{tenant.to_dict()}')
|
current_app.rag_tuning_logger.debug(f'Tenant Information: \n{tenant.to_dict()}')
|
||||||
current_app.rag_tuning_logger.debug(f'===================================================================')
|
current_app.rag_tuning_logger.debug(f'===================================================================')
|
||||||
current_app.rag_tuning_logger.debug(f'===================================================================')
|
current_app.rag_tuning_logger.debug(f'===================================================================')
|
||||||
|
|
||||||
result, interaction = answer_using_tenant_rag(question, language, tenant, chat_session)
|
with current_event.create_span("RAG Answer"):
|
||||||
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
|
result, interaction = answer_using_tenant_rag(question, language, tenant, chat_session)
|
||||||
result['interaction_id'] = interaction.id
|
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
|
||||||
result['room'] = room # Include the room in the result
|
|
||||||
|
|
||||||
if result['insufficient_info']:
|
|
||||||
if 'LLM' in tenant.fallback_algorithms:
|
|
||||||
result, interaction = answer_using_llm(question, language, tenant, chat_session)
|
|
||||||
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['LLM']['name']
|
|
||||||
result['interaction_id'] = interaction.id
|
result['interaction_id'] = interaction.id
|
||||||
result['room'] = room # Include the room in the result
|
result['room'] = room # Include the room in the result
|
||||||
|
|
||||||
return result
|
if result['insufficient_info']:
|
||||||
except Exception as e:
|
if 'LLM' in tenant.fallback_algorithms:
|
||||||
current_app.logger.error(f'ask_question: Error processing question: {e}')
|
with current_event.create_span("Fallback Algorithm LLM"):
|
||||||
raise
|
result, interaction = answer_using_llm(question, language, tenant, chat_session)
|
||||||
|
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['LLM']['name']
|
||||||
|
result['interaction_id'] = interaction.id
|
||||||
|
result['room'] = room # Include the room in the result
|
||||||
|
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f'ask_question: Error processing question: {e}')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def answer_using_tenant_rag(question, language, tenant, chat_session):
|
def answer_using_tenant_rag(question, language, tenant, chat_session):
|
||||||
@@ -128,92 +136,94 @@ def answer_using_tenant_rag(question, language, tenant, chat_session):
|
|||||||
# Langchain debugging if required
|
# Langchain debugging if required
|
||||||
# set_debug(True)
|
# set_debug(True)
|
||||||
|
|
||||||
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
with current_event.create_span("Detail Question"):
|
||||||
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
||||||
if tenant.rag_tuning:
|
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
||||||
current_app.rag_tuning_logger.debug(f'Detailed Question for tenant {tenant.id}:\n{question}.')
|
|
||||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
|
||||||
new_interaction.detailed_question = detailed_question
|
|
||||||
new_interaction.detailed_question_at = dt.now(tz.utc)
|
|
||||||
|
|
||||||
retriever = EveAIRetriever(model_variables, tenant_info)
|
|
||||||
llm = model_variables['llm']
|
|
||||||
template = model_variables['rag_template']
|
|
||||||
language_template = create_language_template(template, language)
|
|
||||||
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
|
|
||||||
rag_prompt = ChatPromptTemplate.from_template(full_template)
|
|
||||||
setup_and_retrieval = RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
|
||||||
if tenant.rag_tuning:
|
|
||||||
current_app.rag_tuning_logger.debug(f'Full prompt for tenant {tenant.id}:\n{full_template}.')
|
|
||||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
|
||||||
|
|
||||||
new_interaction_embeddings = []
|
|
||||||
if not model_variables['cited_answer_cls']: # The model doesn't support structured feedback
|
|
||||||
output_parser = StrOutputParser()
|
|
||||||
|
|
||||||
chain = setup_and_retrieval | rag_prompt | llm | output_parser
|
|
||||||
|
|
||||||
# Invoke the chain with the actual question
|
|
||||||
answer = chain.invoke(detailed_question)
|
|
||||||
new_interaction.answer = answer
|
|
||||||
result = {
|
|
||||||
'answer': answer,
|
|
||||||
'citations': [],
|
|
||||||
'insufficient_info': False
|
|
||||||
}
|
|
||||||
|
|
||||||
else: # The model supports structured feedback
|
|
||||||
structured_llm = llm.with_structured_output(model_variables['cited_answer_cls'])
|
|
||||||
|
|
||||||
chain = setup_and_retrieval | rag_prompt | structured_llm
|
|
||||||
|
|
||||||
result = chain.invoke(detailed_question).dict()
|
|
||||||
current_app.logger.debug(f'ask_question: result answer: {result['answer']}')
|
|
||||||
current_app.logger.debug(f'ask_question: result citations: {result["citations"]}')
|
|
||||||
current_app.logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
|
||||||
if tenant.rag_tuning:
|
if tenant.rag_tuning:
|
||||||
current_app.rag_tuning_logger.debug(f'ask_question: result answer: {result['answer']}')
|
current_app.rag_tuning_logger.debug(f'Detailed Question for tenant {tenant.id}:\n{question}.')
|
||||||
current_app.rag_tuning_logger.debug(f'ask_question: result citations: {result["citations"]}')
|
|
||||||
current_app.rag_tuning_logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
|
||||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||||
new_interaction.answer = result['answer']
|
new_interaction.detailed_question = detailed_question
|
||||||
|
new_interaction.detailed_question_at = dt.now(tz.utc)
|
||||||
|
|
||||||
# Filter out the existing Embedding IDs
|
with current_event.create_span("Generate Answer using RAG"):
|
||||||
given_embedding_ids = [int(emb_id) for emb_id in result['citations']]
|
retriever = EveAIRetriever(model_variables, tenant_info)
|
||||||
embeddings = (
|
llm = model_variables['llm']
|
||||||
db.session.query(Embedding)
|
template = model_variables['rag_template']
|
||||||
.filter(Embedding.id.in_(given_embedding_ids))
|
language_template = create_language_template(template, language)
|
||||||
.all()
|
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
|
||||||
)
|
rag_prompt = ChatPromptTemplate.from_template(full_template)
|
||||||
existing_embedding_ids = [emb.id for emb in embeddings]
|
setup_and_retrieval = RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||||
urls = list(set(emb.document_version.url for emb in embeddings))
|
|
||||||
if tenant.rag_tuning:
|
if tenant.rag_tuning:
|
||||||
current_app.rag_tuning_logger.debug(f'Referenced documents for answer for tenant {tenant.id}:\n')
|
current_app.rag_tuning_logger.debug(f'Full prompt for tenant {tenant.id}:\n{full_template}.')
|
||||||
current_app.rag_tuning_logger.debug(f'{urls}')
|
|
||||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||||
|
|
||||||
for emb_id in existing_embedding_ids:
|
new_interaction_embeddings = []
|
||||||
new_interaction_embedding = InteractionEmbedding(embedding_id=emb_id)
|
if not model_variables['cited_answer_cls']: # The model doesn't support structured feedback
|
||||||
new_interaction_embedding.interaction = new_interaction
|
output_parser = StrOutputParser()
|
||||||
new_interaction_embeddings.append(new_interaction_embedding)
|
|
||||||
|
|
||||||
result['citations'] = urls
|
chain = setup_and_retrieval | rag_prompt | llm | output_parser
|
||||||
|
|
||||||
# Disable langchain debugging if set above.
|
# Invoke the chain with the actual question
|
||||||
# set_debug(False)
|
answer = chain.invoke(detailed_question)
|
||||||
|
new_interaction.answer = answer
|
||||||
|
result = {
|
||||||
|
'answer': answer,
|
||||||
|
'citations': [],
|
||||||
|
'insufficient_info': False
|
||||||
|
}
|
||||||
|
|
||||||
new_interaction.answer_at = dt.now(tz.utc)
|
else: # The model supports structured feedback
|
||||||
chat_session.session_end = dt.now(tz.utc)
|
structured_llm = llm.with_structured_output(model_variables['cited_answer_cls'])
|
||||||
|
|
||||||
try:
|
chain = setup_and_retrieval | rag_prompt | structured_llm
|
||||||
db.session.add(chat_session)
|
|
||||||
db.session.add(new_interaction)
|
result = chain.invoke(detailed_question).dict()
|
||||||
db.session.add_all(new_interaction_embeddings)
|
current_app.logger.debug(f'ask_question: result answer: {result['answer']}')
|
||||||
db.session.commit()
|
current_app.logger.debug(f'ask_question: result citations: {result["citations"]}')
|
||||||
return result, new_interaction
|
current_app.logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
||||||
except SQLAlchemyError as e:
|
if tenant.rag_tuning:
|
||||||
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
current_app.rag_tuning_logger.debug(f'ask_question: result answer: {result['answer']}')
|
||||||
raise
|
current_app.rag_tuning_logger.debug(f'ask_question: result citations: {result["citations"]}')
|
||||||
|
current_app.rag_tuning_logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
||||||
|
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||||
|
new_interaction.answer = result['answer']
|
||||||
|
|
||||||
|
# Filter out the existing Embedding IDs
|
||||||
|
given_embedding_ids = [int(emb_id) for emb_id in result['citations']]
|
||||||
|
embeddings = (
|
||||||
|
db.session.query(Embedding)
|
||||||
|
.filter(Embedding.id.in_(given_embedding_ids))
|
||||||
|
.all()
|
||||||
|
)
|
||||||
|
existing_embedding_ids = [emb.id for emb in embeddings]
|
||||||
|
urls = list(set(emb.document_version.url for emb in embeddings))
|
||||||
|
if tenant.rag_tuning:
|
||||||
|
current_app.rag_tuning_logger.debug(f'Referenced documents for answer for tenant {tenant.id}:\n')
|
||||||
|
current_app.rag_tuning_logger.debug(f'{urls}')
|
||||||
|
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||||
|
|
||||||
|
for emb_id in existing_embedding_ids:
|
||||||
|
new_interaction_embedding = InteractionEmbedding(embedding_id=emb_id)
|
||||||
|
new_interaction_embedding.interaction = new_interaction
|
||||||
|
new_interaction_embeddings.append(new_interaction_embedding)
|
||||||
|
|
||||||
|
result['citations'] = urls
|
||||||
|
|
||||||
|
# Disable langchain debugging if set above.
|
||||||
|
# set_debug(False)
|
||||||
|
|
||||||
|
new_interaction.answer_at = dt.now(tz.utc)
|
||||||
|
chat_session.session_end = dt.now(tz.utc)
|
||||||
|
|
||||||
|
try:
|
||||||
|
db.session.add(chat_session)
|
||||||
|
db.session.add(new_interaction)
|
||||||
|
db.session.add_all(new_interaction_embeddings)
|
||||||
|
db.session.commit()
|
||||||
|
return result, new_interaction
|
||||||
|
except SQLAlchemyError as e:
|
||||||
|
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def answer_using_llm(question, language, tenant, chat_session):
|
def answer_using_llm(question, language, tenant, chat_session):
|
||||||
@@ -233,47 +243,49 @@ def answer_using_llm(question, language, tenant, chat_session):
|
|||||||
# Langchain debugging if required
|
# Langchain debugging if required
|
||||||
# set_debug(True)
|
# set_debug(True)
|
||||||
|
|
||||||
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
with current_event.create_span("Detail Question"):
|
||||||
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
||||||
new_interaction.detailed_question = detailed_question
|
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
||||||
new_interaction.detailed_question_at = dt.now(tz.utc)
|
new_interaction.detailed_question = detailed_question
|
||||||
|
new_interaction.detailed_question_at = dt.now(tz.utc)
|
||||||
|
|
||||||
retriever = EveAIRetriever(model_variables, tenant_info)
|
with current_event.create_span("Detail Answer using LLM"):
|
||||||
llm = model_variables['llm_no_rag']
|
retriever = EveAIRetriever(model_variables, tenant_info)
|
||||||
template = model_variables['encyclopedia_template']
|
llm = model_variables['llm_no_rag']
|
||||||
language_template = create_language_template(template, language)
|
template = model_variables['encyclopedia_template']
|
||||||
rag_prompt = ChatPromptTemplate.from_template(language_template)
|
language_template = create_language_template(template, language)
|
||||||
setup = RunnablePassthrough()
|
rag_prompt = ChatPromptTemplate.from_template(language_template)
|
||||||
output_parser = StrOutputParser()
|
setup = RunnablePassthrough()
|
||||||
|
output_parser = StrOutputParser()
|
||||||
|
|
||||||
new_interaction_embeddings = []
|
new_interaction_embeddings = []
|
||||||
|
|
||||||
chain = setup | rag_prompt | llm | output_parser
|
chain = setup | rag_prompt | llm | output_parser
|
||||||
input_question = {"question": detailed_question}
|
input_question = {"question": detailed_question}
|
||||||
|
|
||||||
# Invoke the chain with the actual question
|
# Invoke the chain with the actual question
|
||||||
answer = chain.invoke(input_question)
|
answer = chain.invoke(input_question)
|
||||||
new_interaction.answer = answer
|
new_interaction.answer = answer
|
||||||
result = {
|
result = {
|
||||||
'answer': answer,
|
'answer': answer,
|
||||||
'citations': [],
|
'citations': [],
|
||||||
'insufficient_info': False
|
'insufficient_info': False
|
||||||
}
|
}
|
||||||
|
|
||||||
# Disable langchain debugging if set above.
|
# Disable langchain debugging if set above.
|
||||||
# set_debug(False)
|
# set_debug(False)
|
||||||
|
|
||||||
new_interaction.answer_at = dt.now(tz.utc)
|
new_interaction.answer_at = dt.now(tz.utc)
|
||||||
chat_session.session_end = dt.now(tz.utc)
|
chat_session.session_end = dt.now(tz.utc)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
db.session.add(chat_session)
|
db.session.add(chat_session)
|
||||||
db.session.add(new_interaction)
|
db.session.add(new_interaction)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
return result, new_interaction
|
return result, new_interaction
|
||||||
except SQLAlchemyError as e:
|
except SQLAlchemyError as e:
|
||||||
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def tasks_ping():
|
def tasks_ping():
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ from common.extensions import minio_client
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from .transcription_processor import TranscriptionProcessor
|
from .transcription_processor import TranscriptionProcessor
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
class AudioProcessor(TranscriptionProcessor):
|
class AudioProcessor(TranscriptionProcessor):
|
||||||
@@ -24,8 +25,13 @@ class AudioProcessor(TranscriptionProcessor):
|
|||||||
self.document_version.id,
|
self.document_version.id,
|
||||||
self.document_version.file_name
|
self.document_version.file_name
|
||||||
)
|
)
|
||||||
compressed_audio = self._compress_audio(file_data)
|
|
||||||
return self._transcribe_audio(compressed_audio)
|
with current_event.create_span("Audio Processing"):
|
||||||
|
compressed_audio = self._compress_audio(file_data)
|
||||||
|
with current_event.create_span("Transcription Generation"):
|
||||||
|
transcription = self._transcribe_audio(compressed_audio)
|
||||||
|
|
||||||
|
return transcription
|
||||||
|
|
||||||
def _compress_audio(self, audio_data):
|
def _compress_audio(self, audio_data):
|
||||||
self._log("Compressing audio")
|
self._log("Compressing audio")
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ from langchain_core.runnables import RunnablePassthrough
|
|||||||
from common.extensions import db, minio_client
|
from common.extensions import db, minio_client
|
||||||
from common.utils.model_utils import create_language_template
|
from common.utils.model_utils import create_language_template
|
||||||
from .processor import Processor
|
from .processor import Processor
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
class HTMLProcessor(Processor):
|
class HTMLProcessor(Processor):
|
||||||
@@ -30,8 +31,10 @@ class HTMLProcessor(Processor):
|
|||||||
)
|
)
|
||||||
html_content = file_data.decode('utf-8')
|
html_content = file_data.decode('utf-8')
|
||||||
|
|
||||||
extracted_html, title = self._parse_html(html_content)
|
with current_event.create_span("HTML Content Extraction"):
|
||||||
markdown = self._generate_markdown_from_html(extracted_html)
|
extracted_html, title = self._parse_html(html_content)
|
||||||
|
with current_event.create_span("Markdown Generation"):
|
||||||
|
markdown = self._generate_markdown_from_html(extracted_html)
|
||||||
|
|
||||||
self._save_markdown(markdown)
|
self._save_markdown(markdown)
|
||||||
self._log("Finished processing HTML")
|
self._log("Finished processing HTML")
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from langchain_core.runnables import RunnablePassthrough
|
|||||||
from common.extensions import minio_client
|
from common.extensions import minio_client
|
||||||
from common.utils.model_utils import create_language_template
|
from common.utils.model_utils import create_language_template
|
||||||
from .processor import Processor
|
from .processor import Processor
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
class PDFProcessor(Processor):
|
class PDFProcessor(Processor):
|
||||||
@@ -32,13 +33,14 @@ class PDFProcessor(Processor):
|
|||||||
self.document_version.file_name
|
self.document_version.file_name
|
||||||
)
|
)
|
||||||
|
|
||||||
extracted_content = self._extract_content(file_data)
|
with current_event.create_span("PDF Extraction"):
|
||||||
structured_content, title = self._structure_content(extracted_content)
|
extracted_content = self._extract_content(file_data)
|
||||||
|
structured_content, title = self._structure_content(extracted_content)
|
||||||
|
|
||||||
llm_chunks = self._split_content_for_llm(structured_content)
|
with current_event.create_span("Markdown Generation"):
|
||||||
markdown = self._process_chunks_with_llm(llm_chunks)
|
llm_chunks = self._split_content_for_llm(structured_content)
|
||||||
|
markdown = self._process_chunks_with_llm(llm_chunks)
|
||||||
self._save_markdown(markdown)
|
self._save_markdown(markdown)
|
||||||
self._log("Finished processing PDF")
|
self._log("Finished processing PDF")
|
||||||
return markdown, title
|
return markdown, title
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
# transcription_processor.py
|
# transcription_processor.py
|
||||||
from common.utils.model_utils import create_language_template
|
|
||||||
from .processor import Processor
|
|
||||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||||
from langchain_core.output_parsers import StrOutputParser
|
from langchain_core.output_parsers import StrOutputParser
|
||||||
from langchain_core.prompts import ChatPromptTemplate
|
from langchain_core.prompts import ChatPromptTemplate
|
||||||
from langchain_core.runnables import RunnablePassthrough
|
from langchain_core.runnables import RunnablePassthrough
|
||||||
|
|
||||||
|
from common.utils.model_utils import create_language_template
|
||||||
|
from .processor import Processor
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
class TranscriptionProcessor(Processor):
|
class TranscriptionProcessor(Processor):
|
||||||
def __init__(self, tenant, model_variables, document_version):
|
def __init__(self, tenant, model_variables, document_version):
|
||||||
@@ -16,12 +18,14 @@ class TranscriptionProcessor(Processor):
|
|||||||
def process(self):
|
def process(self):
|
||||||
self._log("Starting Transcription processing")
|
self._log("Starting Transcription processing")
|
||||||
try:
|
try:
|
||||||
transcription = self._get_transcription()
|
with current_event.create_span("Transcription Generation"):
|
||||||
chunks = self._chunk_transcription(transcription)
|
transcription = self._get_transcription()
|
||||||
markdown_chunks = self._process_chunks(chunks)
|
with current_event.create_span("Markdown Generation"):
|
||||||
full_markdown = self._combine_markdown_chunks(markdown_chunks)
|
chunks = self._chunk_transcription(transcription)
|
||||||
self._save_markdown(full_markdown)
|
markdown_chunks = self._process_chunks(chunks)
|
||||||
self._log("Finished processing Transcription")
|
full_markdown = self._combine_markdown_chunks(markdown_chunks)
|
||||||
|
self._save_markdown(full_markdown)
|
||||||
|
self._log("Finished processing Transcription")
|
||||||
return full_markdown, self._extract_title_from_markdown(full_markdown)
|
return full_markdown, self._extract_title_from_markdown(full_markdown)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._log(f"Error processing Transcription: {str(e)}", level='error')
|
self._log(f"Error processing Transcription: {str(e)}", level='error')
|
||||||
|
|||||||
@@ -24,6 +24,9 @@ from eveai_workers.Processors.html_processor import HTMLProcessor
|
|||||||
from eveai_workers.Processors.pdf_processor import PDFProcessor
|
from eveai_workers.Processors.pdf_processor import PDFProcessor
|
||||||
from eveai_workers.Processors.srt_processor import SRTProcessor
|
from eveai_workers.Processors.srt_processor import SRTProcessor
|
||||||
|
|
||||||
|
from common.utils.business_event import BusinessEvent
|
||||||
|
from common.utils.business_event_context import current_event
|
||||||
|
|
||||||
|
|
||||||
# Healthcheck task
|
# Healthcheck task
|
||||||
@current_celery.task(name='ping', queue='embeddings')
|
@current_celery.task(name='ping', queue='embeddings')
|
||||||
@@ -33,76 +36,78 @@ def ping():
|
|||||||
|
|
||||||
@current_celery.task(name='create_embeddings', queue='embeddings')
|
@current_celery.task(name='create_embeddings', queue='embeddings')
|
||||||
def create_embeddings(tenant_id, document_version_id):
|
def create_embeddings(tenant_id, document_version_id):
|
||||||
current_app.logger.info(f'Creating embeddings for tenant {tenant_id} on document version {document_version_id}.')
|
# BusinessEvent creates a context, which is why we need to use it with a with block
|
||||||
|
with BusinessEvent('Create Embeddings', tenant_id, document_version_id=document_version_id):
|
||||||
|
current_app.logger.info(f'Creating embeddings for tenant {tenant_id} on document version {document_version_id}')
|
||||||
|
try:
|
||||||
|
# Retrieve Tenant for which we are processing
|
||||||
|
tenant = Tenant.query.get(tenant_id)
|
||||||
|
if tenant is None:
|
||||||
|
raise Exception(f'Tenant {tenant_id} not found')
|
||||||
|
|
||||||
try:
|
# Ensure we are working in the correct database schema
|
||||||
# Retrieve Tenant for which we are processing
|
Database(tenant_id).switch_schema()
|
||||||
tenant = Tenant.query.get(tenant_id)
|
|
||||||
if tenant is None:
|
|
||||||
raise Exception(f'Tenant {tenant_id} not found')
|
|
||||||
|
|
||||||
# Ensure we are working in the correct database schema
|
# Select variables to work with depending on tenant and model
|
||||||
Database(tenant_id).switch_schema()
|
model_variables = select_model_variables(tenant)
|
||||||
|
current_app.logger.debug(f'Model variables: {model_variables}')
|
||||||
|
|
||||||
# Select variables to work with depending on tenant and model
|
# Retrieve document version to process
|
||||||
model_variables = select_model_variables(tenant)
|
document_version = DocumentVersion.query.get(document_version_id)
|
||||||
current_app.logger.debug(f'Model variables: {model_variables}')
|
if document_version is None:
|
||||||
|
raise Exception(f'Document version {document_version_id} not found')
|
||||||
|
|
||||||
# Retrieve document version to process
|
except Exception as e:
|
||||||
document_version = DocumentVersion.query.get(document_version_id)
|
current_app.logger.error(f'Create Embeddings request received '
|
||||||
if document_version is None:
|
f'for non existing document version {document_version_id} '
|
||||||
raise Exception(f'Document version {document_version_id} not found')
|
f'for tenant {tenant_id}, '
|
||||||
|
f'error: {e}')
|
||||||
|
raise
|
||||||
|
|
||||||
except Exception as e:
|
try:
|
||||||
current_app.logger.error(f'Create Embeddings request received '
|
db.session.add(document_version)
|
||||||
f'for non existing document version {document_version_id} '
|
|
||||||
f'for tenant {tenant_id}, '
|
|
||||||
f'error: {e}')
|
|
||||||
raise
|
|
||||||
|
|
||||||
try:
|
# start processing
|
||||||
db.session.add(document_version)
|
document_version.processing = True
|
||||||
|
document_version.processing_started_at = dt.now(tz.utc)
|
||||||
|
document_version.processing_finished_at = None
|
||||||
|
document_version.processing_error = None
|
||||||
|
|
||||||
# start processing
|
db.session.commit()
|
||||||
document_version.processing = True
|
except SQLAlchemyError as e:
|
||||||
document_version.processing_started_at = dt.now(tz.utc)
|
current_app.logger.error(f'Unable to save Embedding status information '
|
||||||
document_version.processing_finished_at = None
|
f'in document version {document_version_id} '
|
||||||
document_version.processing_error = None
|
f'for tenant {tenant_id}')
|
||||||
|
raise
|
||||||
|
|
||||||
db.session.commit()
|
delete_embeddings_for_document_version(document_version)
|
||||||
except SQLAlchemyError as e:
|
|
||||||
current_app.logger.error(f'Unable to save Embedding status information '
|
|
||||||
f'in document version {document_version_id} '
|
|
||||||
f'for tenant {tenant_id}')
|
|
||||||
raise
|
|
||||||
|
|
||||||
delete_embeddings_for_document_version(document_version)
|
try:
|
||||||
|
match document_version.file_type:
|
||||||
|
case 'pdf':
|
||||||
|
process_pdf(tenant, model_variables, document_version)
|
||||||
|
case 'html':
|
||||||
|
process_html(tenant, model_variables, document_version)
|
||||||
|
case 'srt':
|
||||||
|
process_srt(tenant, model_variables, document_version)
|
||||||
|
case 'mp4' | 'mp3' | 'ogg':
|
||||||
|
process_audio(tenant, model_variables, document_version)
|
||||||
|
case _:
|
||||||
|
raise Exception(f'No functionality defined for file type {document_version.file_type} '
|
||||||
|
f'for tenant {tenant_id} '
|
||||||
|
f'while creating embeddings for document version {document_version_id}')
|
||||||
|
current_event.log("Finished Embedding Creation Task")
|
||||||
|
|
||||||
try:
|
except Exception as e:
|
||||||
match document_version.file_type:
|
current_app.logger.error(f'Error creating embeddings for tenant {tenant_id} '
|
||||||
case 'pdf':
|
f'on document version {document_version_id} '
|
||||||
process_pdf(tenant, model_variables, document_version)
|
f'error: {e}')
|
||||||
case 'html':
|
document_version.processing = False
|
||||||
process_html(tenant, model_variables, document_version)
|
document_version.processing_finished_at = dt.now(tz.utc)
|
||||||
case 'srt':
|
document_version.processing_error = str(e)[:255]
|
||||||
process_srt(tenant, model_variables, document_version)
|
db.session.commit()
|
||||||
case 'mp4' | 'mp3' | 'ogg':
|
create_embeddings.update_state(state=states.FAILURE)
|
||||||
process_audio(tenant, model_variables, document_version)
|
raise
|
||||||
case _:
|
|
||||||
raise Exception(f'No functionality defined for file type {document_version.file_type} '
|
|
||||||
f'for tenant {tenant_id} '
|
|
||||||
f'while creating embeddings for document version {document_version_id}')
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
current_app.logger.error(f'Error creating embeddings for tenant {tenant_id} '
|
|
||||||
f'on document version {document_version_id} '
|
|
||||||
f'error: {e}')
|
|
||||||
document_version.processing = False
|
|
||||||
document_version.processing_finished_at = dt.now(tz.utc)
|
|
||||||
document_version.processing_error = str(e)[:255]
|
|
||||||
db.session.commit()
|
|
||||||
create_embeddings.update_state(state=states.FAILURE)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def delete_embeddings_for_document_version(document_version):
|
def delete_embeddings_for_document_version(document_version):
|
||||||
@@ -118,35 +123,43 @@ def delete_embeddings_for_document_version(document_version):
|
|||||||
|
|
||||||
|
|
||||||
def process_pdf(tenant, model_variables, document_version):
|
def process_pdf(tenant, model_variables, document_version):
|
||||||
processor = PDFProcessor(tenant, model_variables, document_version)
|
with current_event.create_span("PDF Processing"):
|
||||||
markdown, title = processor.process()
|
processor = PDFProcessor(tenant, model_variables, document_version)
|
||||||
|
markdown, title = processor.process()
|
||||||
|
|
||||||
# Process markdown and embed
|
# Process markdown and embed
|
||||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
with current_event.create_span("Embedding"):
|
||||||
|
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||||
|
|
||||||
|
|
||||||
def process_html(tenant, model_variables, document_version):
|
def process_html(tenant, model_variables, document_version):
|
||||||
processor = HTMLProcessor(tenant, model_variables, document_version)
|
with current_event.create_span("HTML Processing"):
|
||||||
markdown, title = processor.process()
|
processor = HTMLProcessor(tenant, model_variables, document_version)
|
||||||
|
markdown, title = processor.process()
|
||||||
|
|
||||||
# Process markdown and embed
|
# Process markdown and embed
|
||||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
with current_event.create_span("Embedding"):
|
||||||
|
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||||
|
|
||||||
|
|
||||||
def process_audio(tenant, model_variables, document_version):
|
def process_audio(tenant, model_variables, document_version):
|
||||||
processor = AudioProcessor(tenant, model_variables, document_version)
|
with current_event.create_span("Audio Processing"):
|
||||||
markdown, title = processor.process()
|
processor = AudioProcessor(tenant, model_variables, document_version)
|
||||||
|
markdown, title = processor.process()
|
||||||
|
|
||||||
# Process markdown and embed
|
# Process markdown and embed
|
||||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
with current_event.create_span("Embedding"):
|
||||||
|
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||||
|
|
||||||
|
|
||||||
def process_srt(tenant, model_variables, document_version):
|
def process_srt(tenant, model_variables, document_version):
|
||||||
processor = SRTProcessor(tenant, model_variables, document_version)
|
with current_event.create_span("SRT Processing"):
|
||||||
markdown, title = processor.process()
|
processor = SRTProcessor(tenant, model_variables, document_version)
|
||||||
|
markdown, title = processor.process()
|
||||||
|
|
||||||
# Process markdown and embed
|
# Process markdown and embed
|
||||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
with current_event.create_span("Embedding"):
|
||||||
|
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||||
|
|
||||||
|
|
||||||
def embed_markdown(tenant, model_variables, document_version, markdown, title):
|
def embed_markdown(tenant, model_variables, document_version, markdown, title):
|
||||||
@@ -181,6 +194,7 @@ def embed_markdown(tenant, model_variables, document_version, markdown, title):
|
|||||||
|
|
||||||
|
|
||||||
def enrich_chunks(tenant, model_variables, document_version, title, chunks):
|
def enrich_chunks(tenant, model_variables, document_version, title, chunks):
|
||||||
|
current_event.log("Starting Enriching Chunks Processing")
|
||||||
current_app.logger.debug(f'Enriching chunks for tenant {tenant.id} '
|
current_app.logger.debug(f'Enriching chunks for tenant {tenant.id} '
|
||||||
f'on document version {document_version.id}')
|
f'on document version {document_version.id}')
|
||||||
|
|
||||||
@@ -213,11 +227,13 @@ def enrich_chunks(tenant, model_variables, document_version, title, chunks):
|
|||||||
|
|
||||||
current_app.logger.debug(f'Finished enriching chunks for tenant {tenant.id} '
|
current_app.logger.debug(f'Finished enriching chunks for tenant {tenant.id} '
|
||||||
f'on document version {document_version.id}')
|
f'on document version {document_version.id}')
|
||||||
|
current_event.log("Finished Enriching Chunks Processing")
|
||||||
|
|
||||||
return enriched_chunks
|
return enriched_chunks
|
||||||
|
|
||||||
|
|
||||||
def summarize_chunk(tenant, model_variables, document_version, chunk):
|
def summarize_chunk(tenant, model_variables, document_version, chunk):
|
||||||
|
current_event.log("Starting Summarizing Chunk")
|
||||||
current_app.logger.debug(f'Summarizing chunk for tenant {tenant.id} '
|
current_app.logger.debug(f'Summarizing chunk for tenant {tenant.id} '
|
||||||
f'on document version {document_version.id}')
|
f'on document version {document_version.id}')
|
||||||
llm = model_variables['llm']
|
llm = model_variables['llm']
|
||||||
@@ -235,6 +251,7 @@ def summarize_chunk(tenant, model_variables, document_version, chunk):
|
|||||||
summary = chain.invoke({"text": chunk})
|
summary = chain.invoke({"text": chunk})
|
||||||
current_app.logger.debug(f'Finished summarizing chunk for tenant {tenant.id} '
|
current_app.logger.debug(f'Finished summarizing chunk for tenant {tenant.id} '
|
||||||
f'on document version {document_version.id}.')
|
f'on document version {document_version.id}.')
|
||||||
|
current_event.log("Finished Summarizing Chunk")
|
||||||
return summary
|
return summary
|
||||||
except LangChainException as e:
|
except LangChainException as e:
|
||||||
current_app.logger.error(f'Error creating summary for chunk enrichment for tenant {tenant.id} '
|
current_app.logger.error(f'Error creating summary for chunk enrichment for tenant {tenant.id} '
|
||||||
@@ -244,6 +261,7 @@ def summarize_chunk(tenant, model_variables, document_version, chunk):
|
|||||||
|
|
||||||
|
|
||||||
def embed_chunks(tenant, model_variables, document_version, chunks):
|
def embed_chunks(tenant, model_variables, document_version, chunks):
|
||||||
|
current_event.log("Starting Embedding Chunks Processing")
|
||||||
current_app.logger.debug(f'Embedding chunks for tenant {tenant.id} '
|
current_app.logger.debug(f'Embedding chunks for tenant {tenant.id} '
|
||||||
f'on document version {document_version.id}')
|
f'on document version {document_version.id}')
|
||||||
embedding_model = model_variables['embedding_model']
|
embedding_model = model_variables['embedding_model']
|
||||||
@@ -268,6 +286,8 @@ def embed_chunks(tenant, model_variables, document_version, chunks):
|
|||||||
new_embedding.embedding = embedding
|
new_embedding.embedding = embedding
|
||||||
new_embeddings.append(new_embedding)
|
new_embeddings.append(new_embedding)
|
||||||
|
|
||||||
|
current_app.logger.debug(f'Finished embedding chunks for tenant {tenant.id} ')
|
||||||
|
|
||||||
return new_embeddings
|
return new_embeddings
|
||||||
|
|
||||||
|
|
||||||
@@ -281,244 +301,6 @@ def log_parsing_info(tenant, tags, included_elements, excluded_elements, exclude
|
|||||||
current_app.embed_tuning_logger.debug(f'First element to parse: {elements_to_parse[0]}')
|
current_app.embed_tuning_logger.debug(f'First element to parse: {elements_to_parse[0]}')
|
||||||
|
|
||||||
|
|
||||||
# def process_youtube(tenant, model_variables, document_version):
|
|
||||||
# download_file_name = f'{document_version.id}.mp4'
|
|
||||||
# compressed_file_name = f'{document_version.id}.mp3'
|
|
||||||
# transcription_file_name = f'{document_version.id}.txt'
|
|
||||||
# markdown_file_name = f'{document_version.id}.md'
|
|
||||||
#
|
|
||||||
# # Remove existing files (in case of a re-processing of the file
|
|
||||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id, download_file_name)
|
|
||||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id, compressed_file_name)
|
|
||||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id, transcription_file_name)
|
|
||||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id, markdown_file_name)
|
|
||||||
#
|
|
||||||
# of, title, description, author = download_youtube(document_version.url, tenant.id, document_version,
|
|
||||||
# download_file_name)
|
|
||||||
# document_version.system_context = f'Title: {title}\nDescription: {description}\nAuthor: {author}'
|
|
||||||
# compress_audio(tenant.id, document_version, download_file_name, compressed_file_name)
|
|
||||||
# transcribe_audio(tenant.id, document_version, compressed_file_name, transcription_file_name, model_variables)
|
|
||||||
# annotate_transcription(tenant, document_version, transcription_file_name, markdown_file_name, model_variables)
|
|
||||||
#
|
|
||||||
# potential_chunks = create_potential_chunks_for_markdown(tenant.id, document_version, markdown_file_name)
|
|
||||||
# actual_chunks = combine_chunks_for_markdown(potential_chunks, model_variables['min_chunk_size'],
|
|
||||||
# model_variables['max_chunk_size'])
|
|
||||||
#
|
|
||||||
# enriched_chunks = enrich_chunks(tenant, document_version, actual_chunks)
|
|
||||||
# embeddings = embed_chunks(tenant, model_variables, document_version, enriched_chunks)
|
|
||||||
#
|
|
||||||
# try:
|
|
||||||
# db.session.add(document_version)
|
|
||||||
# document_version.processing_finished_at = dt.now(tz.utc)
|
|
||||||
# document_version.processing = False
|
|
||||||
# db.session.add_all(embeddings)
|
|
||||||
# db.session.commit()
|
|
||||||
# except SQLAlchemyError as e:
|
|
||||||
# current_app.logger.error(f'Error saving embedding information for tenant {tenant.id} '
|
|
||||||
# f'on Youtube document version {document_version.id}'
|
|
||||||
# f'error: {e}')
|
|
||||||
# raise
|
|
||||||
#
|
|
||||||
# current_app.logger.info(f'Embeddings created successfully for tenant {tenant.id} '
|
|
||||||
# f'on Youtube document version {document_version.id} :-)')
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# def download_youtube(url, tenant_id, document_version, file_name):
|
|
||||||
# try:
|
|
||||||
# current_app.logger.info(f'Downloading YouTube video: {url} for tenant: {tenant_id}')
|
|
||||||
# yt = YouTube(url)
|
|
||||||
# stream = yt.streams.get_audio_only()
|
|
||||||
#
|
|
||||||
# with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
|
||||||
# stream.download(output_path=temp_file.name)
|
|
||||||
# with open(temp_file.name, 'rb') as f:
|
|
||||||
# file_data = f.read()
|
|
||||||
#
|
|
||||||
# minio_client.upload_document_file(tenant_id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id,
|
|
||||||
# file_name, file_data)
|
|
||||||
#
|
|
||||||
# current_app.logger.info(f'Downloaded YouTube video: {url} for tenant: {tenant_id}')
|
|
||||||
# return file_name, yt.title, yt.description, yt.author
|
|
||||||
# except Exception as e:
|
|
||||||
# current_app.logger.error(f'Error downloading YouTube video: {url} for tenant: {tenant_id} with error: {e}')
|
|
||||||
# raise
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# def compress_audio(tenant_id, document_version, input_file, output_file):
|
|
||||||
# try:
|
|
||||||
# current_app.logger.info(f'Compressing audio for tenant: {tenant_id}')
|
|
||||||
#
|
|
||||||
# input_data = minio_client.download_document_file(tenant_id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id, input_file)
|
|
||||||
#
|
|
||||||
# with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_input:
|
|
||||||
# temp_input.write(input_data)
|
|
||||||
# temp_input.flush()
|
|
||||||
#
|
|
||||||
# with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_output:
|
|
||||||
# result = subprocess.run(
|
|
||||||
# ['ffmpeg', '-i', temp_input.name, '-b:a', '64k', '-f', 'mp3', temp_output.name],
|
|
||||||
# capture_output=True,
|
|
||||||
# text=True
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# if result.returncode != 0:
|
|
||||||
# raise Exception(f"Compression failed: {result.stderr}")
|
|
||||||
#
|
|
||||||
# with open(temp_output.name, 'rb') as f:
|
|
||||||
# compressed_data = f.read()
|
|
||||||
#
|
|
||||||
# minio_client.upload_document_file(tenant_id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id,
|
|
||||||
# output_file, compressed_data)
|
|
||||||
#
|
|
||||||
# current_app.logger.info(f'Compressed audio for tenant: {tenant_id}')
|
|
||||||
# except Exception as e:
|
|
||||||
# current_app.logger.error(f'Error compressing audio for tenant: {tenant_id} with error: {e}')
|
|
||||||
# raise
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# def transcribe_audio(tenant_id, document_version, input_file, output_file, model_variables):
|
|
||||||
# try:
|
|
||||||
# current_app.logger.info(f'Transcribing audio for tenant: {tenant_id}')
|
|
||||||
# client = model_variables['transcription_client']
|
|
||||||
# model = model_variables['transcription_model']
|
|
||||||
#
|
|
||||||
# # Download the audio file from MinIO
|
|
||||||
# audio_data = minio_client.download_document_file(tenant_id, document_version.doc_id, document_version.language,
|
|
||||||
# document_version.id, input_file)
|
|
||||||
#
|
|
||||||
# # Load the audio data into pydub
|
|
||||||
# audio = AudioSegment.from_mp3(io.BytesIO(audio_data))
|
|
||||||
#
|
|
||||||
# # Define segment length (e.g., 10 minutes)
|
|
||||||
# segment_length = 10 * 60 * 1000 # 10 minutes in milliseconds
|
|
||||||
#
|
|
||||||
# transcriptions = []
|
|
||||||
#
|
|
||||||
# # Split audio into segments and transcribe each
|
|
||||||
# for i, chunk in enumerate(audio[::segment_length]):
|
|
||||||
# current_app.logger.debug(f'Transcribing chunk {i + 1} of {len(audio) // segment_length + 1}')
|
|
||||||
#
|
|
||||||
# with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_audio:
|
|
||||||
# chunk.export(temp_audio.name, format="mp3")
|
|
||||||
#
|
|
||||||
# with open(temp_audio.name, 'rb') as audio_segment:
|
|
||||||
# transcription = client.audio.transcriptions.create(
|
|
||||||
# file=audio_segment,
|
|
||||||
# model=model,
|
|
||||||
# language=document_version.language,
|
|
||||||
# response_format='verbose_json',
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# transcriptions.append(transcription.text)
|
|
||||||
#
|
|
||||||
# os.unlink(temp_audio.name) # Delete the temporary file
|
|
||||||
#
|
|
||||||
# # Combine all transcriptions
|
|
||||||
# full_transcription = " ".join(transcriptions)
|
|
||||||
#
|
|
||||||
# # Upload the full transcription to MinIO
|
|
||||||
# minio_client.upload_document_file(
|
|
||||||
# tenant_id,
|
|
||||||
# document_version.doc_id,
|
|
||||||
# document_version.language,
|
|
||||||
# document_version.id,
|
|
||||||
# output_file,
|
|
||||||
# full_transcription.encode('utf-8')
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# current_app.logger.info(f'Transcribed audio for tenant: {tenant_id}')
|
|
||||||
# except Exception as e:
|
|
||||||
# current_app.logger.error(f'Error transcribing audio for tenant: {tenant_id}, with error: {e}')
|
|
||||||
# raise
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# def annotate_transcription(tenant, document_version, input_file, output_file, model_variables):
|
|
||||||
# try:
|
|
||||||
# current_app.logger.debug(f'Annotating transcription for tenant {tenant.id}')
|
|
||||||
#
|
|
||||||
# char_splitter = CharacterTextSplitter(separator='.',
|
|
||||||
# chunk_size=model_variables['annotation_chunk_length'],
|
|
||||||
# chunk_overlap=0)
|
|
||||||
#
|
|
||||||
# headers_to_split_on = [
|
|
||||||
# ("#", "Header 1"),
|
|
||||||
# ("##", "Header 2"),
|
|
||||||
# ]
|
|
||||||
# markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on, strip_headers=False)
|
|
||||||
#
|
|
||||||
# llm = model_variables['llm']
|
|
||||||
# template = model_variables['transcript_template']
|
|
||||||
# language_template = create_language_template(template, document_version.language)
|
|
||||||
# transcript_prompt = ChatPromptTemplate.from_template(language_template)
|
|
||||||
# setup = RunnablePassthrough()
|
|
||||||
# output_parser = StrOutputParser()
|
|
||||||
#
|
|
||||||
# # Download the transcription file from MinIO
|
|
||||||
# transcript_data = minio_client.download_document_file(tenant.id, document_version.doc_id,
|
|
||||||
# document_version.language, document_version.id,
|
|
||||||
# input_file)
|
|
||||||
# transcript = transcript_data.decode('utf-8')
|
|
||||||
#
|
|
||||||
# chain = setup | transcript_prompt | llm | output_parser
|
|
||||||
#
|
|
||||||
# chunks = char_splitter.split_text(transcript)
|
|
||||||
# all_markdown_chunks = []
|
|
||||||
# last_markdown_chunk = ''
|
|
||||||
# for chunk in chunks:
|
|
||||||
# current_app.logger.debug(f'Annotating next chunk of {len(chunks)} for tenant {tenant.id}')
|
|
||||||
# full_input = last_markdown_chunk + '\n' + chunk
|
|
||||||
# if tenant.embed_tuning:
|
|
||||||
# current_app.embed_tuning_logger.debug(f'Annotating chunk: \n '
|
|
||||||
# f'------------------\n'
|
|
||||||
# f'{full_input}\n'
|
|
||||||
# f'------------------\n')
|
|
||||||
# input_transcript = {'transcript': full_input}
|
|
||||||
# markdown = chain.invoke(input_transcript)
|
|
||||||
# # GPT-4o returns some kind of content description: ```markdown <text> ```
|
|
||||||
# if markdown.startswith("```markdown"):
|
|
||||||
# markdown = "\n".join(markdown.strip().split("\n")[1:-1])
|
|
||||||
# if tenant.embed_tuning:
|
|
||||||
# current_app.embed_tuning_logger.debug(f'Markdown Received: \n '
|
|
||||||
# f'------------------\n'
|
|
||||||
# f'{markdown}\n'
|
|
||||||
# f'------------------\n')
|
|
||||||
# md_header_splits = markdown_splitter.split_text(markdown)
|
|
||||||
# markdown_chunks = [doc.page_content for doc in md_header_splits]
|
|
||||||
# # claude-3.5-sonnet returns introductory text
|
|
||||||
# if not markdown_chunks[0].startswith('#'):
|
|
||||||
# markdown_chunks.pop(0)
|
|
||||||
# last_markdown_chunk = markdown_chunks[-1]
|
|
||||||
# last_markdown_chunk = "\n".join(markdown.strip().split("\n")[1:])
|
|
||||||
# markdown_chunks.pop()
|
|
||||||
# all_markdown_chunks += markdown_chunks
|
|
||||||
#
|
|
||||||
# all_markdown_chunks += [last_markdown_chunk]
|
|
||||||
#
|
|
||||||
# annotated_transcript = '\n'.join(all_markdown_chunks)
|
|
||||||
#
|
|
||||||
# # Upload the annotated transcript to MinIO
|
|
||||||
# minio_client.upload_document_file(
|
|
||||||
# tenant.id,
|
|
||||||
# document_version.doc_id,
|
|
||||||
# document_version.language,
|
|
||||||
# document_version.id,
|
|
||||||
# output_file,
|
|
||||||
# annotated_transcript.encode('utf-8')
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# current_app.logger.info(f'Annotated transcription for tenant {tenant.id}')
|
|
||||||
# except Exception as e:
|
|
||||||
# current_app.logger.error(f'Error annotating transcription for tenant {tenant.id}, with error: {e}')
|
|
||||||
# raise
|
|
||||||
|
|
||||||
|
|
||||||
def create_potential_chunks_for_markdown(tenant_id, document_version, input_file):
|
def create_potential_chunks_for_markdown(tenant_id, document_version, input_file):
|
||||||
try:
|
try:
|
||||||
current_app.logger.info(f'Creating potential chunks for tenant {tenant_id}')
|
current_app.logger.info(f'Creating potential chunks for tenant {tenant_id}')
|
||||||
|
|||||||
49
migrations/public/versions/25588210dab2_llm_metrics_added.py
Normal file
49
migrations/public/versions/25588210dab2_llm_metrics_added.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
"""LLM Metrics Added
|
||||||
|
|
||||||
|
Revision ID: 25588210dab2
|
||||||
|
Revises: 083ccd8206ea
|
||||||
|
Create Date: 2024-09-17 12:44:12.242990
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy.dialects import postgresql
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '25588210dab2'
|
||||||
|
down_revision = '083ccd8206ea'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.create_table('llm_usage_metric',
|
||||||
|
sa.Column('id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('environment', sa.String(length=20), nullable=False),
|
||||||
|
sa.Column('activity', sa.String(length=20), nullable=False),
|
||||||
|
sa.Column('sub_activity', sa.String(length=20), nullable=False),
|
||||||
|
sa.Column('activity_detail', sa.String(length=50), nullable=True),
|
||||||
|
sa.Column('session_id', sa.String(length=50), nullable=True),
|
||||||
|
sa.Column('interaction_id', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('document_version_id', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('prompt_tokens', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('completion_tokens', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('total_tokens', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('cost', sa.Float(), nullable=True),
|
||||||
|
sa.Column('latency', sa.Float(), nullable=True),
|
||||||
|
sa.Column('model_name', sa.String(length=50), nullable=False),
|
||||||
|
sa.Column('timestamp', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('additional_info', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
|
||||||
|
sa.PrimaryKeyConstraint('id'),
|
||||||
|
schema='public'
|
||||||
|
)
|
||||||
|
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_table('llm_usage_metric', schema='public')
|
||||||
|
# ### end Alembic commands ###
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
"""Corrected BusinessEventLog
|
||||||
|
|
||||||
|
Revision ID: 2cbdb23ae02e
|
||||||
|
Revises: e3c6ff8c22df
|
||||||
|
Create Date: 2024-09-25 10:17:40.154566
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '2cbdb23ae02e'
|
||||||
|
down_revision = 'e3c6ff8c22df'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
with op.batch_alter_table('business_event_log', schema=None) as batch_op:
|
||||||
|
batch_op.alter_column('span_id',
|
||||||
|
existing_type=sa.VARCHAR(length=50),
|
||||||
|
nullable=True)
|
||||||
|
batch_op.alter_column('span_name',
|
||||||
|
existing_type=sa.VARCHAR(length=50),
|
||||||
|
nullable=True)
|
||||||
|
batch_op.alter_column('parent_span_id',
|
||||||
|
existing_type=sa.VARCHAR(length=50),
|
||||||
|
nullable=True)
|
||||||
|
|
||||||
|
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
with op.batch_alter_table('business_event_log', schema=None) as batch_op:
|
||||||
|
batch_op.alter_column('parent_span_id',
|
||||||
|
existing_type=sa.VARCHAR(length=50),
|
||||||
|
nullable=False)
|
||||||
|
batch_op.alter_column('span_name',
|
||||||
|
existing_type=sa.VARCHAR(length=50),
|
||||||
|
nullable=False)
|
||||||
|
batch_op.alter_column('span_id',
|
||||||
|
existing_type=sa.VARCHAR(length=50),
|
||||||
|
nullable=False)
|
||||||
|
|
||||||
|
# ### end Alembic commands ###
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
"""session_id is uuid iso integeger
|
||||||
|
|
||||||
|
Revision ID: 829094f07d44
|
||||||
|
Revises: 2cbdb23ae02e
|
||||||
|
Create Date: 2024-09-27 09:19:13.201988
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '829094f07d44'
|
||||||
|
down_revision = '2cbdb23ae02e'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
with op.batch_alter_table('business_event_log', schema=None) as batch_op:
|
||||||
|
batch_op.alter_column('chat_session_id',
|
||||||
|
existing_type=sa.INTEGER(),
|
||||||
|
type_=sa.String(length=50),
|
||||||
|
existing_nullable=True)
|
||||||
|
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
with op.batch_alter_table('business_event_log', schema=None) as batch_op:
|
||||||
|
batch_op.alter_column('chat_session_id',
|
||||||
|
existing_type=sa.String(length=50),
|
||||||
|
type_=sa.INTEGER(),
|
||||||
|
existing_nullable=True)
|
||||||
|
|
||||||
|
# ### end Alembic commands ###
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
"""Updated Monitoring Setup
|
||||||
|
|
||||||
|
Revision ID: e3c6ff8c22df
|
||||||
|
Revises: 25588210dab2
|
||||||
|
Create Date: 2024-09-25 10:05:57.684506
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy.dialects import postgresql
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'e3c6ff8c22df'
|
||||||
|
down_revision = '25588210dab2'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.create_table('business_event_log',
|
||||||
|
sa.Column('id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('timestamp', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('event_type', sa.String(length=50), nullable=False),
|
||||||
|
sa.Column('tenant_id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('trace_id', sa.String(length=50), nullable=False),
|
||||||
|
sa.Column('span_id', sa.String(length=50), nullable=False),
|
||||||
|
sa.Column('span_name', sa.String(length=50), nullable=False),
|
||||||
|
sa.Column('parent_span_id', sa.String(length=50), nullable=False),
|
||||||
|
sa.Column('document_version_id', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('chat_session_id', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('interaction_id', sa.Integer(), nullable=True),
|
||||||
|
sa.Column('environment', sa.String(length=20), nullable=True),
|
||||||
|
sa.Column('message', sa.Text(), nullable=True),
|
||||||
|
sa.PrimaryKeyConstraint('id'),
|
||||||
|
schema='public'
|
||||||
|
)
|
||||||
|
op.drop_table('llm_usage_metric')
|
||||||
|
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
|
||||||
|
op.create_table('llm_usage_metric',
|
||||||
|
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
|
||||||
|
sa.Column('tenant_id', sa.INTEGER(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('environment', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('activity', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('sub_activity', sa.VARCHAR(length=20), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('activity_detail', sa.VARCHAR(length=50), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('session_id', sa.VARCHAR(length=50), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('interaction_id', sa.INTEGER(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('document_version_id', sa.INTEGER(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('prompt_tokens', sa.INTEGER(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('completion_tokens', sa.INTEGER(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('total_tokens', sa.INTEGER(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('cost', sa.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('latency', sa.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('model_name', sa.VARCHAR(length=50), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('timestamp', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('additional_info', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True),
|
||||||
|
sa.PrimaryKeyConstraint('id', name='llm_usage_metric_pkey')
|
||||||
|
)
|
||||||
|
op.drop_table('business_event_log', schema='public')
|
||||||
|
# ### end Alembic commands ###
|
||||||
@@ -159,13 +159,12 @@ http {
|
|||||||
}
|
}
|
||||||
|
|
||||||
location /flower/ {
|
location /flower/ {
|
||||||
proxy_pass http://127.0.0.1:5555/;
|
proxy_pass http://flower:5555/flower/;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
include sites-enabled/*;
|
include sites-enabled/*;
|
||||||
|
|||||||
@@ -26,22 +26,22 @@ greenlet~=3.0.3
|
|||||||
gunicorn~=22.0.0
|
gunicorn~=22.0.0
|
||||||
Jinja2~=3.1.4
|
Jinja2~=3.1.4
|
||||||
kombu~=5.3.7
|
kombu~=5.3.7
|
||||||
langchain~=0.2.7
|
langchain~=0.3.0
|
||||||
langchain-anthropic~=0.1.19
|
langchain-anthropic~=0.2.0
|
||||||
langchain-community~=0.2.7
|
langchain-community~=0.3.0
|
||||||
langchain-core~=0.2.16
|
langchain-core~=0.3.0
|
||||||
langchain-mistralai~=0.1.9
|
langchain-mistralai~=0.2.0
|
||||||
langchain-openai~=0.1.15
|
langchain-openai~=0.2.0
|
||||||
langchain-postgres~=0.0.9
|
langchain-postgres~=0.0.12
|
||||||
langchain-text-splitters~=0.2.2
|
langchain-text-splitters~=0.3.0
|
||||||
langcodes~=3.4.0
|
langcodes~=3.4.0
|
||||||
langdetect~=1.0.9
|
langdetect~=1.0.9
|
||||||
langsmith~=0.1.81
|
langsmith~=0.1.81
|
||||||
openai~=1.35.13
|
openai~=1.45.1
|
||||||
pg8000~=1.31.2
|
pg8000~=1.31.2
|
||||||
pgvector~=0.2.5
|
pgvector~=0.2.5
|
||||||
pycryptodome~=3.20.0
|
pycryptodome~=3.20.0
|
||||||
pydantic~=2.7.4
|
pydantic~=2.9.1
|
||||||
PyJWT~=2.8.0
|
PyJWT~=2.8.0
|
||||||
PySocks~=1.7.1
|
PySocks~=1.7.1
|
||||||
python-dateutil~=2.9.0.post0
|
python-dateutil~=2.9.0.post0
|
||||||
@@ -53,7 +53,7 @@ pytz~=2024.1
|
|||||||
PyYAML~=6.0.2rc1
|
PyYAML~=6.0.2rc1
|
||||||
redis~=5.0.4
|
redis~=5.0.4
|
||||||
requests~=2.32.3
|
requests~=2.32.3
|
||||||
SQLAlchemy~=2.0.31
|
SQLAlchemy~=2.0.35
|
||||||
tiktoken~=0.7.0
|
tiktoken~=0.7.0
|
||||||
tzdata~=2024.1
|
tzdata~=2024.1
|
||||||
urllib3~=2.2.2
|
urllib3~=2.2.2
|
||||||
@@ -63,7 +63,7 @@ zxcvbn~=4.4.28
|
|||||||
groq~=0.9.0
|
groq~=0.9.0
|
||||||
pydub~=0.25.1
|
pydub~=0.25.1
|
||||||
argparse~=1.4.0
|
argparse~=1.4.0
|
||||||
portkey_ai~=1.8.2
|
portkey_ai~=1.8.7
|
||||||
minio~=7.2.7
|
minio~=7.2.7
|
||||||
Werkzeug~=3.0.3
|
Werkzeug~=3.0.3
|
||||||
itsdangerous~=2.2.0
|
itsdangerous~=2.2.0
|
||||||
@@ -76,4 +76,7 @@ PyPDF2~=3.0.1
|
|||||||
flask-restx~=1.3.0
|
flask-restx~=1.3.0
|
||||||
prometheus-flask-exporter~=0.23.1
|
prometheus-flask-exporter~=0.23.1
|
||||||
flask-healthz~=1.0.1
|
flask-healthz~=1.0.1
|
||||||
|
langsmith~=0.1.121
|
||||||
|
anthropic~=0.34.2
|
||||||
|
prometheus-client~=0.20.0
|
||||||
|
flower~=2.0.1
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ export PYTHONPATH="$PROJECT_DIR/patched_packages:$PYTHONPATH:$PROJECT_DIR" # In
|
|||||||
chown -R appuser:appuser /app/logs
|
chown -R appuser:appuser /app/logs
|
||||||
|
|
||||||
# Start a worker for the 'embeddings' queue with higher concurrency
|
# Start a worker for the 'embeddings' queue with higher concurrency
|
||||||
celery -A eveai_workers.celery worker --loglevel=info -Q embeddings --autoscale=2,8 --hostname=embeddings_worker@%h &
|
celery -A eveai_workers.celery worker --loglevel=debug -Q embeddings --autoscale=2,8 --hostname=embeddings_worker@%h &
|
||||||
|
|
||||||
# Start a worker for the 'llm_interactions' queue with auto-scaling - not necessary, in eveai_chat_workers
|
# Start a worker for the 'llm_interactions' queue with auto-scaling - not necessary, in eveai_chat_workers
|
||||||
# celery -A eveai_workers.celery worker --loglevel=info - Q llm_interactions --autoscale=2,8 --hostname=interactions_worker@%h &
|
# celery -A eveai_workers.celery worker --loglevel=info - Q llm_interactions --autoscale=2,8 --hostname=interactions_worker@%h &
|
||||||
|
|||||||
33
scripts/start_flower.sh
Executable file → Normal file
33
scripts/start_flower.sh
Executable file → Normal file
@@ -1,9 +1,28 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
cd "/Volumes/OWC4M2_1/Dropbox/Josako's Dev/Josako/EveAI/Development/eveAI/" || exit 1
|
# scripts/start_flower.sh
|
||||||
source "/Volumes/OWC4M2_1/Dropbox/Josako's Dev/Josako/EveAI/Development/eveAI/.venv/bin/activate"
|
|
||||||
|
|
||||||
# on development machine, no authentication required
|
# Set default values
|
||||||
export FLOWER_UNAUTHENTICATED_API=True
|
REDIS_HOST=${REDIS_URL:-redis}
|
||||||
# Start a worker for the 'embeddings' queue with higher concurrency
|
REDIS_PORT=${REDIS_PORT:-6379}
|
||||||
celery -A eveai_workers.celery flower
|
|
||||||
|
# Set environment-specific variables
|
||||||
|
if [ "$FLASK_ENV" = "production" ]; then
|
||||||
|
# Production settings
|
||||||
|
export FLOWER_BASIC_AUTH="${FLOWER_USER}:${FLOWER_PASSWORD}"
|
||||||
|
export FLOWER_BROKER_URL="redis://${REDIS_USER}:${REDIS_PASS}@${REDIS_URL}:${REDIS_PORT}/0"
|
||||||
|
export CELERY_BROKER_URL="redis://${REDIS_USER}:${REDIS_PASS}@${REDIS_URL}:${REDIS_PORT}/0"
|
||||||
|
else
|
||||||
|
# Development settings
|
||||||
|
export FLOWER_BROKER_URL="redis://${REDIS_HOST}:${REDIS_PORT}/0"
|
||||||
|
export CELERY_BROKER_URL="redis://${REDIS_HOST}:${REDIS_PORT}/0"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo $BROKER_URL
|
||||||
|
echo "----------"
|
||||||
|
|
||||||
|
# Start Flower
|
||||||
|
exec celery flower \
|
||||||
|
--url-prefix=/flower \
|
||||||
|
--port=5555
|
||||||
|
|||||||
Reference in New Issue
Block a user