Compare commits
17 Commits
v1.0.6-alf
...
v1.0.11-al
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f5f090f0c | ||
|
|
5ffad160b1 | ||
|
|
d6a7743f26 | ||
|
|
9782e31ae5 | ||
|
|
f638860e90 | ||
|
|
b700cfac64 | ||
|
|
883175b8f5 | ||
|
|
ae697df4c9 | ||
|
|
d9cb00fcdc | ||
|
|
ee1b0f1cfa | ||
|
|
a740c96630 | ||
|
|
67bdeac434 | ||
|
|
1622591afd | ||
|
|
6cf660e622 | ||
|
|
9e14824249 | ||
|
|
76cb825660 | ||
|
|
341ba47d1c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -41,3 +41,5 @@ migrations/.DS_Store
|
||||
migrations/public/.DS_Store
|
||||
scripts/.DS_Store
|
||||
scripts/__pycache__/run_eveai_app.cpython-312.pyc
|
||||
/eveai_repo.txt
|
||||
*repo.txt
|
||||
|
||||
6
.idea/sqldialects.xml
generated
6
.idea/sqldialects.xml
generated
@@ -1,6 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="SqlDialectMappings">
|
||||
<file url="PROJECT" dialect="PostgreSQL" />
|
||||
</component>
|
||||
</project>
|
||||
20
.repopackignore_base
Normal file
20
.repopackignore_base
Normal file
@@ -0,0 +1,20 @@
|
||||
# Add patterns to ignore here, one per line
|
||||
# Example:
|
||||
# *.log
|
||||
# tmp/
|
||||
logs/
|
||||
nginx/static/assets/fonts/
|
||||
nginx/static/assets/img/
|
||||
nginx/static/assets/js/
|
||||
nginx/static/scss/
|
||||
patched_packages/
|
||||
migrations/
|
||||
*material*
|
||||
*nucleo*
|
||||
*package*
|
||||
nginx/mime.types
|
||||
*.gitignore*
|
||||
.python-version
|
||||
.repopackignore*
|
||||
repopack.config.json
|
||||
*repo.txt
|
||||
12
.repopackignore_components
Normal file
12
.repopackignore_components
Normal file
@@ -0,0 +1,12 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
12
.repopackignore_docker
Normal file
12
.repopackignore_docker
Normal file
@@ -0,0 +1,12 @@
|
||||
common/
|
||||
config/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
11
.repopackignore_eveai_api
Normal file
11
.repopackignore_eveai_api
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_eveai_app
Normal file
11
.repopackignore_eveai_app
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_eveai_beat
Normal file
11
.repopackignore_eveai_beat
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_eveai_chat
Normal file
11
.repopackignore_eveai_chat
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_eveai_chat_workers
Normal file
11
.repopackignore_eveai_chat_workers
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_eveai_entitlements
Normal file
11
.repopackignore_eveai_entitlements
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_eveai_workers
Normal file
11
.repopackignore_eveai_workers
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
instance/
|
||||
integrations/
|
||||
nginx/
|
||||
scripts/
|
||||
4
.repopackignore_full
Normal file
4
.repopackignore_full
Normal file
@@ -0,0 +1,4 @@
|
||||
docker
|
||||
integrations
|
||||
nginx
|
||||
scripts
|
||||
13
.repopackignore_integrations
Normal file
13
.repopackignore_integrations
Normal file
@@ -0,0 +1,13 @@
|
||||
common/
|
||||
config/
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
nginx/
|
||||
scripts/
|
||||
11
.repopackignore_nginx
Normal file
11
.repopackignore_nginx
Normal file
@@ -0,0 +1,11 @@
|
||||
docker/
|
||||
eveai_api/
|
||||
eveai_app/
|
||||
eveai_beat/
|
||||
eveai_chat/
|
||||
eveai_chat_workers/
|
||||
eveai_entitlements/
|
||||
eveai_workers/
|
||||
instance/
|
||||
integrations/
|
||||
scripts/
|
||||
79
CHANGELOG.md
79
CHANGELOG.md
@@ -20,11 +20,88 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- For now removed features.
|
||||
|
||||
### Fixed
|
||||
- For any bug fixes.
|
||||
- Set default language when registering Documents or URLs.
|
||||
|
||||
### Security
|
||||
- In case of vulnerabilities.
|
||||
|
||||
## [1.0.11-alfa]
|
||||
|
||||
### Added
|
||||
- License Usage Calculation realised
|
||||
- View License Usages
|
||||
- Celery Beat container added
|
||||
- First schedule in Celery Beat for calculating usage (hourly)
|
||||
|
||||
### Changed
|
||||
- repopack can now split for different components
|
||||
|
||||
### Fixed
|
||||
- Various fixes as consequece of changing file_location / file_name ==> bucket_name / object_name
|
||||
- Celery Routing / Queuing updated
|
||||
|
||||
## [1.0.10-alfa]
|
||||
|
||||
### Added
|
||||
- BusinessEventLog monitoring using Langchain native code
|
||||
|
||||
### Changed
|
||||
- Allow longer audio files (or video) to be uploaded and processed
|
||||
- Storage and Embedding usage now expressed in MiB iso tokens (more logical)
|
||||
- Views for License / LicenseTier
|
||||
|
||||
### Removed
|
||||
- Portkey removed for monitoring usage
|
||||
|
||||
## [1.0.9-alfa] - 2024/10/01
|
||||
|
||||
### Added
|
||||
- Business Event tracing (eveai_workers & eveai_chat_workers)
|
||||
- Flower Container added for monitoring
|
||||
|
||||
### Changed
|
||||
- Healthcheck improvements
|
||||
- model_utils turned into a class with lazy loading
|
||||
|
||||
### Deprecated
|
||||
- For soon-to-be removed features.
|
||||
|
||||
### Removed
|
||||
- For now removed features.
|
||||
|
||||
### Fixed
|
||||
- Set default language when registering Documents or URLs.
|
||||
|
||||
## [1.0.8-alfa] - 2024-09-12
|
||||
|
||||
### Added
|
||||
- Tenant type defined to allow for active, inactive, demo ... tenants
|
||||
- Search and filtering functionality on Tenants
|
||||
- Implementation of health checks (1st version)
|
||||
- Provision for Prometheus monitoring (no implementation yet)
|
||||
- Refine audio_processor and srt_processor to reduce duplicate code and support larger files
|
||||
- Introduction of repopack to reason in LLMs about the code
|
||||
|
||||
### Fixed
|
||||
- Refine audio_processor and srt_processor to reduce duplicate code and support larger files
|
||||
|
||||
## [1.0.7-alfa] - 2024-09-12
|
||||
|
||||
### Added
|
||||
- Full Document API allowing for creation, updating and invalidation of documents.
|
||||
- Metadata fields (JSON) added to DocumentVersion, allowing end-users to add structured information
|
||||
- Wordpress plugin eveai_sync to synchronize Wordpress content with EveAI
|
||||
|
||||
### Fixed
|
||||
- Maximal deduplication of code between views and api in document_utils.py
|
||||
|
||||
## [1.0.6-alfa] - 2024-09-03
|
||||
|
||||
### Fixed
|
||||
- Problems with tenant scheme migrations - may have to be revisited
|
||||
- Correction of default language settings when uploading docs or URLs
|
||||
- Addition of a CHANGELOG.md file
|
||||
|
||||
## [1.0.5-alfa] - 2024-09-02
|
||||
|
||||
### Added
|
||||
|
||||
@@ -9,9 +9,9 @@ from flask_socketio import SocketIO
|
||||
from flask_jwt_extended import JWTManager
|
||||
from flask_session import Session
|
||||
from flask_wtf import CSRFProtect
|
||||
from flask_restful import Api
|
||||
from flask_restx import Api
|
||||
from prometheus_flask_exporter import PrometheusMetrics
|
||||
|
||||
from .utils.nginx_utils import prefixed_url_for
|
||||
from .utils.simple_encryption import SimpleEncryption
|
||||
from .utils.minio_utils import MinioClient
|
||||
|
||||
@@ -28,9 +28,7 @@ cors = CORS()
|
||||
socketio = SocketIO()
|
||||
jwt = JWTManager()
|
||||
session = Session()
|
||||
api = Api()
|
||||
|
||||
# kms_client = JosKMSClient.from_service_account_json('config/gc_sa_eveai.json')
|
||||
|
||||
api_rest = Api()
|
||||
simple_encryption = SimpleEncryption()
|
||||
minio_client = MinioClient()
|
||||
metrics = PrometheusMetrics.for_app_factory()
|
||||
|
||||
@@ -1,23 +1,31 @@
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
from sqlalchemy import asc
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field, BaseModel, PrivateAttr
|
||||
from typing import Any, Dict
|
||||
from flask import current_app
|
||||
|
||||
from common.extensions import db
|
||||
from common.models.interaction import ChatSession, Interaction
|
||||
from common.utils.datetime_utils import get_date_in_timezone
|
||||
from common.utils.model_utils import ModelVariables
|
||||
|
||||
|
||||
class EveAIHistoryRetriever(BaseRetriever):
|
||||
model_variables: Dict[str, Any] = Field(...)
|
||||
session_id: str = Field(...)
|
||||
class EveAIHistoryRetriever(BaseRetriever, BaseModel):
|
||||
_model_variables: ModelVariables = PrivateAttr()
|
||||
_session_id: str = PrivateAttr()
|
||||
|
||||
def __init__(self, model_variables: Dict[str, Any], session_id: str):
|
||||
def __init__(self, model_variables: ModelVariables, session_id: str):
|
||||
super().__init__()
|
||||
self.model_variables = model_variables
|
||||
self.session_id = session_id
|
||||
self._model_variables = model_variables
|
||||
self._session_id = session_id
|
||||
|
||||
@property
|
||||
def model_variables(self) -> ModelVariables:
|
||||
return self._model_variables
|
||||
|
||||
@property
|
||||
def session_id(self) -> str:
|
||||
return self._session_id
|
||||
|
||||
def _get_relevant_documents(self, query: str):
|
||||
current_app.logger.debug(f'Retrieving history of interactions for query: {query}')
|
||||
@@ -1,30 +1,39 @@
|
||||
from langchain_core.retrievers import BaseRetriever
|
||||
from sqlalchemy import func, and_, or_, desc
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
from typing import Any, Dict
|
||||
from flask import current_app
|
||||
|
||||
from common.extensions import db
|
||||
from common.models.document import Document, DocumentVersion
|
||||
from common.utils.datetime_utils import get_date_in_timezone
|
||||
from common.utils.model_utils import ModelVariables
|
||||
|
||||
|
||||
class EveAIRetriever(BaseRetriever):
|
||||
model_variables: Dict[str, Any] = Field(...)
|
||||
tenant_info: Dict[str, Any] = Field(...)
|
||||
class EveAIRetriever(BaseRetriever, BaseModel):
|
||||
_model_variables: ModelVariables = PrivateAttr()
|
||||
_tenant_info: Dict[str, Any] = PrivateAttr()
|
||||
|
||||
def __init__(self, model_variables: Dict[str, Any], tenant_info: Dict[str, Any]):
|
||||
def __init__(self, model_variables: ModelVariables, tenant_info: Dict[str, Any]):
|
||||
super().__init__()
|
||||
self.model_variables = model_variables
|
||||
self.tenant_info = tenant_info
|
||||
current_app.logger.debug(f'Model variables type: {type(model_variables)}')
|
||||
self._model_variables = model_variables
|
||||
self._tenant_info = tenant_info
|
||||
|
||||
@property
|
||||
def model_variables(self) -> ModelVariables:
|
||||
return self._model_variables
|
||||
|
||||
@property
|
||||
def tenant_info(self) -> Dict[str, Any]:
|
||||
return self._tenant_info
|
||||
|
||||
def _get_relevant_documents(self, query: str):
|
||||
|
||||
|
||||
|
||||
current_app.logger.debug(f'Retrieving relevant documents for query: {query}')
|
||||
query_embedding = self._get_query_embedding(query)
|
||||
current_app.logger.debug(f'Model Variables Private: {type(self._model_variables)}')
|
||||
current_app.logger.debug(f'Model Variables Property: {type(self.model_variables)}')
|
||||
db_class = self.model_variables['embedding_db_model']
|
||||
similarity_threshold = self.model_variables['similarity_threshold']
|
||||
k = self.model_variables['k']
|
||||
49
common/langchain/llm_metrics_handler.py
Normal file
49
common/langchain/llm_metrics_handler.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import time
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from typing import Dict, Any, List
|
||||
from langchain.schema import LLMResult
|
||||
from common.utils.business_event_context import current_event
|
||||
from flask import current_app
|
||||
|
||||
|
||||
class LLMMetricsHandler(BaseCallbackHandler):
|
||||
def __init__(self):
|
||||
self.total_tokens: int = 0
|
||||
self.prompt_tokens: int = 0
|
||||
self.completion_tokens: int = 0
|
||||
self.start_time: float = 0
|
||||
self.end_time: float = 0
|
||||
self.total_time: float = 0
|
||||
|
||||
def reset(self):
|
||||
self.total_tokens = 0
|
||||
self.prompt_tokens = 0
|
||||
self.completion_tokens = 0
|
||||
self.start_time = 0
|
||||
self.end_time = 0
|
||||
self.total_time = 0
|
||||
|
||||
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
|
||||
self.start_time = time.time()
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
self.end_time = time.time()
|
||||
self.total_time = self.end_time - self.start_time
|
||||
|
||||
usage = response.llm_output.get('token_usage', {})
|
||||
self.prompt_tokens += usage.get('prompt_tokens', 0)
|
||||
self.completion_tokens += usage.get('completion_tokens', 0)
|
||||
self.total_tokens = self.prompt_tokens + self.completion_tokens
|
||||
|
||||
metrics = self.get_metrics()
|
||||
current_event.log_llm_metrics(metrics)
|
||||
self.reset() # Reset for the next call
|
||||
|
||||
def get_metrics(self) -> Dict[str, int | float]:
|
||||
return {
|
||||
'total_tokens': self.total_tokens,
|
||||
'prompt_tokens': self.prompt_tokens,
|
||||
'completion_tokens': self.completion_tokens,
|
||||
'time_elapsed': self.total_time,
|
||||
'interaction_type': 'LLM',
|
||||
}
|
||||
51
common/langchain/tracked_openai_embeddings.py
Normal file
51
common/langchain/tracked_openai_embeddings.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from typing import List, Any
|
||||
import time
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class TrackedOpenAIEmbeddings(OpenAIEmbeddings):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
||||
start_time = time.time()
|
||||
result = super().embed_documents(texts)
|
||||
end_time = time.time()
|
||||
|
||||
# Estimate token usage (OpenAI uses tiktoken for this)
|
||||
import tiktoken
|
||||
enc = tiktoken.encoding_for_model(self.model)
|
||||
total_tokens = sum(len(enc.encode(text)) for text in texts)
|
||||
|
||||
metrics = {
|
||||
'total_tokens': total_tokens,
|
||||
'prompt_tokens': total_tokens, # For embeddings, all tokens are prompt tokens
|
||||
'completion_tokens': 0,
|
||||
'time_elapsed': end_time - start_time,
|
||||
'interaction_type': 'Embedding',
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
|
||||
return result
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
start_time = time.time()
|
||||
result = super().embed_query(text)
|
||||
end_time = time.time()
|
||||
|
||||
# Estimate token usage
|
||||
import tiktoken
|
||||
enc = tiktoken.encoding_for_model(self.model)
|
||||
total_tokens = len(enc.encode(text))
|
||||
|
||||
metrics = {
|
||||
'total_tokens': total_tokens,
|
||||
'prompt_tokens': total_tokens,
|
||||
'completion_tokens': 0,
|
||||
'time_elapsed': end_time - start_time,
|
||||
'interaction_type': 'Embedding',
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
|
||||
return result
|
||||
27
common/langchain/tracked_transcribe.py
Normal file
27
common/langchain/tracked_transcribe.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import time
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
def tracked_transcribe(client, *args, **kwargs):
|
||||
start_time = time.time()
|
||||
|
||||
# Extract the file and model from kwargs if present, otherwise use defaults
|
||||
file = kwargs.get('file')
|
||||
model = kwargs.get('model', 'whisper-1')
|
||||
duration = kwargs.pop('duration', 600)
|
||||
|
||||
result = client.audio.transcriptions.create(*args, **kwargs)
|
||||
end_time = time.time()
|
||||
|
||||
# Token usage for transcriptions is actually the duration in seconds we pass, as the whisper model is priced per second transcribed
|
||||
|
||||
metrics = {
|
||||
'total_tokens': duration,
|
||||
'prompt_tokens': 0, # For transcriptions, all tokens are considered "completion"
|
||||
'completion_tokens': duration,
|
||||
'time_elapsed': end_time - start_time,
|
||||
'interaction_type': 'ASR',
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
|
||||
return result
|
||||
2
common/models/README.txt
Normal file
2
common/models/README.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
If models are added to the public schema (i.e. in the user domain), ensure to add their corresponding tables to the
|
||||
env.py, get_public_table_names, for tenant migrations!
|
||||
@@ -1,6 +1,7 @@
|
||||
from common.extensions import db
|
||||
from .user import User, Tenant
|
||||
from pgvector.sqlalchemy import Vector
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
|
||||
|
||||
class Document(db.Model):
|
||||
@@ -27,12 +28,15 @@ class DocumentVersion(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
doc_id = db.Column(db.Integer, db.ForeignKey(Document.id), nullable=False)
|
||||
url = db.Column(db.String(200), nullable=True)
|
||||
file_location = db.Column(db.String(255), nullable=True)
|
||||
file_name = db.Column(db.String(200), nullable=True)
|
||||
bucket_name = db.Column(db.String(255), nullable=True)
|
||||
object_name = db.Column(db.String(200), nullable=True)
|
||||
file_type = db.Column(db.String(20), nullable=True)
|
||||
file_size = db.Column(db.Float, nullable=True)
|
||||
language = db.Column(db.String(2), nullable=False)
|
||||
user_context = db.Column(db.Text, nullable=True)
|
||||
system_context = db.Column(db.Text, nullable=True)
|
||||
user_metadata = db.Column(JSONB, nullable=True)
|
||||
system_metadata = db.Column(JSONB, nullable=True)
|
||||
|
||||
# Versioning Information
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
@@ -52,12 +56,6 @@ class DocumentVersion(db.Model):
|
||||
def __repr__(self):
|
||||
return f"<DocumentVersion {self.document_language.document_id}.{self.document_language.language}>.{self.id}>"
|
||||
|
||||
def calc_file_location(self):
|
||||
return f"{self.document.tenant_id}/{self.document.id}/{self.language}"
|
||||
|
||||
def calc_file_name(self):
|
||||
return f"{self.id}.{self.file_type}"
|
||||
|
||||
|
||||
class Embedding(db.Model):
|
||||
__tablename__ = 'embeddings'
|
||||
|
||||
110
common/models/entitlements.py
Normal file
110
common/models/entitlements.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from common.extensions import db
|
||||
|
||||
|
||||
class BusinessEventLog(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
timestamp = db.Column(db.DateTime, nullable=False)
|
||||
event_type = db.Column(db.String(50), nullable=False)
|
||||
tenant_id = db.Column(db.Integer, nullable=False)
|
||||
trace_id = db.Column(db.String(50), nullable=False)
|
||||
span_id = db.Column(db.String(50))
|
||||
span_name = db.Column(db.String(50))
|
||||
parent_span_id = db.Column(db.String(50))
|
||||
document_version_id = db.Column(db.Integer)
|
||||
document_version_file_size = db.Column(db.Float)
|
||||
chat_session_id = db.Column(db.String(50))
|
||||
interaction_id = db.Column(db.Integer)
|
||||
environment = db.Column(db.String(20))
|
||||
llm_metrics_total_tokens = db.Column(db.Integer)
|
||||
llm_metrics_prompt_tokens = db.Column(db.Integer)
|
||||
llm_metrics_completion_tokens = db.Column(db.Integer)
|
||||
llm_metrics_total_time = db.Column(db.Float)
|
||||
llm_metrics_call_count = db.Column(db.Integer)
|
||||
llm_interaction_type = db.Column(db.String(20))
|
||||
message = db.Column(db.Text)
|
||||
license_usage_id = db.Column(db.Integer, db.ForeignKey('public.license_usage.id'), nullable=True)
|
||||
license_usage = db.relationship('LicenseUsage', backref='events')
|
||||
|
||||
|
||||
class License(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
tenant_id = db.Column(db.Integer, db.ForeignKey('public.tenant.id'), nullable=False)
|
||||
tier_id = db.Column(db.Integer, db.ForeignKey('public.license_tier.id'),nullable=False) # 'small', 'medium', 'custom'
|
||||
start_date = db.Column(db.Date, nullable=False)
|
||||
end_date = db.Column(db.Date, nullable=True)
|
||||
currency = db.Column(db.String(20), nullable=False)
|
||||
yearly_payment = db.Column(db.Boolean, nullable=False, default=False)
|
||||
basic_fee = db.Column(db.Float, nullable=False)
|
||||
max_storage_mb = db.Column(db.Integer, nullable=False)
|
||||
additional_storage_price = db.Column(db.Float, nullable=False)
|
||||
additional_storage_bucket = db.Column(db.Integer, nullable=False)
|
||||
included_embedding_mb = db.Column(db.Integer, nullable=False)
|
||||
additional_embedding_price = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_embedding_bucket = db.Column(db.Integer, nullable=False)
|
||||
included_interaction_tokens = db.Column(db.Integer, nullable=False)
|
||||
additional_interaction_token_price = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_interaction_bucket = db.Column(db.Integer, nullable=False)
|
||||
overage_embedding = db.Column(db.Float, nullable=False, default=0)
|
||||
overage_interaction = db.Column(db.Float, nullable=False, default=0)
|
||||
|
||||
tenant = db.relationship('Tenant', back_populates='licenses')
|
||||
license_tier = db.relationship('LicenseTier', back_populates='licenses')
|
||||
usages = db.relationship('LicenseUsage', order_by='LicenseUsage.period_start_date', back_populates='license')
|
||||
|
||||
|
||||
class LicenseTier(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.String(50), nullable=False)
|
||||
version = db.Column(db.String(50), nullable=False)
|
||||
start_date = db.Column(db.Date, nullable=False)
|
||||
end_date = db.Column(db.Date, nullable=True)
|
||||
basic_fee_d = db.Column(db.Float, nullable=True)
|
||||
basic_fee_e = db.Column(db.Float, nullable=True)
|
||||
max_storage_mb = db.Column(db.Integer, nullable=False)
|
||||
additional_storage_price_d = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_storage_price_e = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_storage_bucket = db.Column(db.Integer, nullable=False)
|
||||
included_embedding_mb = db.Column(db.Integer, nullable=False)
|
||||
additional_embedding_price_d = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_embedding_price_e = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_embedding_bucket = db.Column(db.Integer, nullable=False)
|
||||
included_interaction_tokens = db.Column(db.Integer, nullable=False)
|
||||
additional_interaction_token_price_d = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_interaction_token_price_e = db.Column(db.Numeric(10, 4), nullable=False)
|
||||
additional_interaction_bucket = db.Column(db.Integer, nullable=False)
|
||||
standard_overage_embedding = db.Column(db.Float, nullable=False, default=0)
|
||||
standard_overage_interaction = db.Column(db.Float, nullable=False, default=0)
|
||||
|
||||
licenses = db.relationship('License', back_populates='license_tier')
|
||||
|
||||
|
||||
class LicenseUsage(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
license_id = db.Column(db.Integer, db.ForeignKey('public.license.id'), nullable=False)
|
||||
tenant_id = db.Column(db.Integer, db.ForeignKey('public.tenant.id'), nullable=False)
|
||||
storage_mb_used = db.Column(db.Float, default=0)
|
||||
embedding_mb_used = db.Column(db.Float, default=0)
|
||||
embedding_prompt_tokens_used = db.Column(db.Integer, default=0)
|
||||
embedding_completion_tokens_used = db.Column(db.Integer, default=0)
|
||||
embedding_total_tokens_used = db.Column(db.Integer, default=0)
|
||||
interaction_prompt_tokens_used = db.Column(db.Integer, default=0)
|
||||
interaction_completion_tokens_used = db.Column(db.Integer, default=0)
|
||||
interaction_total_tokens_used = db.Column(db.Integer, default=0)
|
||||
period_start_date = db.Column(db.Date, nullable=False)
|
||||
period_end_date = db.Column(db.Date, nullable=False)
|
||||
|
||||
license = db.relationship('License', back_populates='usages')
|
||||
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
from datetime import date
|
||||
|
||||
from common.extensions import db
|
||||
from flask_security import UserMixin, RoleMixin
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import CheckConstraint
|
||||
|
||||
from common.models.entitlements import License
|
||||
|
||||
|
||||
class Tenant(db.Model):
|
||||
@@ -21,6 +24,7 @@ class Tenant(db.Model):
|
||||
website = db.Column(db.String(255), nullable=True)
|
||||
timezone = db.Column(db.String(50), nullable=True, default='UTC')
|
||||
rag_context = db.Column(db.Text, nullable=True)
|
||||
type = db.Column(db.String(20), nullable=True, server_default='Active')
|
||||
|
||||
# language information
|
||||
default_language = db.Column(db.String(2), nullable=True)
|
||||
@@ -50,20 +54,32 @@ class Tenant(db.Model):
|
||||
fallback_algorithms = db.Column(ARRAY(sa.String(50)), nullable=True)
|
||||
|
||||
# Licensing Information
|
||||
license_start_date = db.Column(db.Date, nullable=True)
|
||||
license_end_date = db.Column(db.Date, nullable=True)
|
||||
allowed_monthly_interactions = db.Column(db.Integer, nullable=True)
|
||||
encrypted_chat_api_key = db.Column(db.String(500), nullable=True)
|
||||
encrypted_api_key = db.Column(db.String(500), nullable=True)
|
||||
|
||||
|
||||
# Tuning enablers
|
||||
embed_tuning = db.Column(db.Boolean, nullable=True, default=False)
|
||||
rag_tuning = db.Column(db.Boolean, nullable=True, default=False)
|
||||
|
||||
# Entitlements
|
||||
currency = db.Column(db.String(20), nullable=True)
|
||||
usage_email = db.Column(db.String(255), nullable=True)
|
||||
storage_dirty = db.Column(db.Boolean, nullable=True, default=False)
|
||||
|
||||
# Relations
|
||||
users = db.relationship('User', backref='tenant')
|
||||
domains = db.relationship('TenantDomain', backref='tenant')
|
||||
licenses = db.relationship('License', back_populates='tenant')
|
||||
license_usages = db.relationship('LicenseUsage', backref='tenant')
|
||||
|
||||
@property
|
||||
def current_license(self):
|
||||
today = date.today()
|
||||
return License.query.filter(
|
||||
License.tenant_id == self.id,
|
||||
License.start_date <= today,
|
||||
(License.end_date.is_(None) | (License.end_date >= today))
|
||||
).order_by(License.start_date.desc()).first()
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Tenant {self.id}: {self.name}>"
|
||||
@@ -75,6 +91,7 @@ class Tenant(db.Model):
|
||||
'website': self.website,
|
||||
'timezone': self.timezone,
|
||||
'rag_context': self.rag_context,
|
||||
'type': self.type,
|
||||
'default_language': self.default_language,
|
||||
'allowed_languages': self.allowed_languages,
|
||||
'embedding_model': self.embedding_model,
|
||||
@@ -91,11 +108,10 @@ class Tenant(db.Model):
|
||||
'chat_RAG_temperature': self.chat_RAG_temperature,
|
||||
'chat_no_RAG_temperature': self.chat_no_RAG_temperature,
|
||||
'fallback_algorithms': self.fallback_algorithms,
|
||||
'license_start_date': self.license_start_date,
|
||||
'license_end_date': self.license_end_date,
|
||||
'allowed_monthly_interactions': self.allowed_monthly_interactions,
|
||||
'embed_tuning': self.embed_tuning,
|
||||
'rag_tuning': self.rag_tuning,
|
||||
'currency': self.currency,
|
||||
'usage_email': self.usage_email,
|
||||
}
|
||||
|
||||
|
||||
|
||||
246
common/utils/business_event.py
Normal file
246
common/utils/business_event.py
Normal file
@@ -0,0 +1,246 @@
|
||||
import os
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from portkey_ai import Portkey, Config
|
||||
import logging
|
||||
|
||||
from .business_event_context import BusinessEventContext
|
||||
from common.models.entitlements import BusinessEventLog
|
||||
from common.extensions import db
|
||||
|
||||
|
||||
class BusinessEvent:
|
||||
# The BusinessEvent class itself is a context manager, but it doesn't use the @contextmanager decorator.
|
||||
# Instead, it defines __enter__ and __exit__ methods explicitly. This is because we're doing something a bit more
|
||||
# complex - we're interacting with the BusinessEventContext and the _business_event_stack.
|
||||
|
||||
def __init__(self, event_type: str, tenant_id: int, **kwargs):
|
||||
self.event_type = event_type
|
||||
self.tenant_id = tenant_id
|
||||
self.trace_id = str(uuid.uuid4())
|
||||
self.span_id = None
|
||||
self.span_name = None
|
||||
self.parent_span_id = None
|
||||
self.document_version_id = kwargs.get('document_version_id')
|
||||
self.document_version_file_size = kwargs.get('document_version_file_size')
|
||||
self.chat_session_id = kwargs.get('chat_session_id')
|
||||
self.interaction_id = kwargs.get('interaction_id')
|
||||
self.environment = os.environ.get("FLASK_ENV", "development")
|
||||
self.span_counter = 0
|
||||
self.spans = []
|
||||
self.llm_metrics = {
|
||||
'total_tokens': 0,
|
||||
'prompt_tokens': 0,
|
||||
'completion_tokens': 0,
|
||||
'total_time': 0,
|
||||
'call_count': 0,
|
||||
'interaction_type': None
|
||||
}
|
||||
|
||||
def update_attribute(self, attribute: str, value: any):
|
||||
if hasattr(self, attribute):
|
||||
setattr(self, attribute, value)
|
||||
else:
|
||||
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute}'")
|
||||
|
||||
def update_llm_metrics(self, metrics: dict):
|
||||
self.llm_metrics['total_tokens'] += metrics['total_tokens']
|
||||
self.llm_metrics['prompt_tokens'] += metrics['prompt_tokens']
|
||||
self.llm_metrics['completion_tokens'] += metrics['completion_tokens']
|
||||
self.llm_metrics['total_time'] += metrics['time_elapsed']
|
||||
self.llm_metrics['call_count'] += 1
|
||||
self.llm_metrics['interaction_type'] = metrics['interaction_type']
|
||||
|
||||
def reset_llm_metrics(self):
|
||||
self.llm_metrics['total_tokens'] = 0
|
||||
self.llm_metrics['prompt_tokens'] = 0
|
||||
self.llm_metrics['completion_tokens'] = 0
|
||||
self.llm_metrics['total_time'] = 0
|
||||
self.llm_metrics['call_count'] = 0
|
||||
self.llm_metrics['interaction_type'] = None
|
||||
|
||||
@contextmanager
|
||||
def create_span(self, span_name: str):
|
||||
# The create_span method is designed to be used as a context manager. We want to perform some actions when
|
||||
# entering the span (like setting the span ID and name) and some actions when exiting the span (like removing
|
||||
# these temporary attributes). The @contextmanager decorator allows us to write this method in a way that
|
||||
# clearly separates the "entry" and "exit" logic, with the yield statement in between.
|
||||
|
||||
parent_span_id = self.span_id
|
||||
self.span_counter += 1
|
||||
new_span_id = str(uuid.uuid4())
|
||||
|
||||
# Save the current span info
|
||||
self.spans.append((self.span_id, self.span_name, self.parent_span_id))
|
||||
|
||||
# Set the new span info
|
||||
self.span_id = new_span_id
|
||||
self.span_name = span_name
|
||||
self.parent_span_id = parent_span_id
|
||||
|
||||
self.log(f"Starting span {span_name}")
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if self.llm_metrics['call_count'] > 0:
|
||||
self.log_final_metrics()
|
||||
self.reset_llm_metrics()
|
||||
self.log(f"Ending span {span_name}")
|
||||
# Restore the previous span info
|
||||
if self.spans:
|
||||
self.span_id, self.span_name, self.parent_span_id = self.spans.pop()
|
||||
else:
|
||||
self.span_id = None
|
||||
self.span_name = None
|
||||
self.parent_span_id = None
|
||||
|
||||
def log(self, message: str, level: str = 'info'):
|
||||
logger = logging.getLogger('business_events')
|
||||
log_data = {
|
||||
'event_type': self.event_type,
|
||||
'tenant_id': self.tenant_id,
|
||||
'trace_id': self.trace_id,
|
||||
'span_id': self.span_id,
|
||||
'span_name': self.span_name,
|
||||
'parent_span_id': self.parent_span_id,
|
||||
'document_version_id': self.document_version_id,
|
||||
'document_version_file_size': self.document_version_file_size,
|
||||
'chat_session_id': self.chat_session_id,
|
||||
'interaction_id': self.interaction_id,
|
||||
'environment': self.environment,
|
||||
}
|
||||
# log to Graylog
|
||||
getattr(logger, level)(message, extra=log_data)
|
||||
|
||||
# Log to database
|
||||
event_log = BusinessEventLog(
|
||||
timestamp=dt.now(tz=tz.utc),
|
||||
event_type=self.event_type,
|
||||
tenant_id=self.tenant_id,
|
||||
trace_id=self.trace_id,
|
||||
span_id=self.span_id,
|
||||
span_name=self.span_name,
|
||||
parent_span_id=self.parent_span_id,
|
||||
document_version_id=self.document_version_id,
|
||||
document_version_file_size=self.document_version_file_size,
|
||||
chat_session_id=self.chat_session_id,
|
||||
interaction_id=self.interaction_id,
|
||||
environment=self.environment,
|
||||
message=message
|
||||
)
|
||||
db.session.add(event_log)
|
||||
db.session.commit()
|
||||
|
||||
def log_llm_metrics(self, metrics: dict, level: str = 'info'):
|
||||
self.update_llm_metrics(metrics)
|
||||
message = "LLM Metrics"
|
||||
logger = logging.getLogger('business_events')
|
||||
log_data = {
|
||||
'event_type': self.event_type,
|
||||
'tenant_id': self.tenant_id,
|
||||
'trace_id': self.trace_id,
|
||||
'span_id': self.span_id,
|
||||
'span_name': self.span_name,
|
||||
'parent_span_id': self.parent_span_id,
|
||||
'document_version_id': self.document_version_id,
|
||||
'document_version_file_size': self.document_version_file_size,
|
||||
'chat_session_id': self.chat_session_id,
|
||||
'interaction_id': self.interaction_id,
|
||||
'environment': self.environment,
|
||||
'llm_metrics_total_tokens': metrics['total_tokens'],
|
||||
'llm_metrics_prompt_tokens': metrics['prompt_tokens'],
|
||||
'llm_metrics_completion_tokens': metrics['completion_tokens'],
|
||||
'llm_metrics_total_time': metrics['time_elapsed'],
|
||||
'llm_interaction_type': metrics['interaction_type'],
|
||||
}
|
||||
# log to Graylog
|
||||
getattr(logger, level)(message, extra=log_data)
|
||||
|
||||
# Log to database
|
||||
event_log = BusinessEventLog(
|
||||
timestamp=dt.now(tz=tz.utc),
|
||||
event_type=self.event_type,
|
||||
tenant_id=self.tenant_id,
|
||||
trace_id=self.trace_id,
|
||||
span_id=self.span_id,
|
||||
span_name=self.span_name,
|
||||
parent_span_id=self.parent_span_id,
|
||||
document_version_id=self.document_version_id,
|
||||
document_version_file_size=self.document_version_file_size,
|
||||
chat_session_id=self.chat_session_id,
|
||||
interaction_id=self.interaction_id,
|
||||
environment=self.environment,
|
||||
llm_metrics_total_tokens=metrics['total_tokens'],
|
||||
llm_metrics_prompt_tokens=metrics['prompt_tokens'],
|
||||
llm_metrics_completion_tokens=metrics['completion_tokens'],
|
||||
llm_metrics_total_time=metrics['time_elapsed'],
|
||||
llm_interaction_type=metrics['interaction_type'],
|
||||
message=message
|
||||
)
|
||||
db.session.add(event_log)
|
||||
db.session.commit()
|
||||
|
||||
def log_final_metrics(self, level: str = 'info'):
|
||||
logger = logging.getLogger('business_events')
|
||||
message = "Final LLM Metrics"
|
||||
log_data = {
|
||||
'event_type': self.event_type,
|
||||
'tenant_id': self.tenant_id,
|
||||
'trace_id': self.trace_id,
|
||||
'span_id': self.span_id,
|
||||
'span_name': self.span_name,
|
||||
'parent_span_id': self.parent_span_id,
|
||||
'document_version_id': self.document_version_id,
|
||||
'document_version_file_size': self.document_version_file_size,
|
||||
'chat_session_id': self.chat_session_id,
|
||||
'interaction_id': self.interaction_id,
|
||||
'environment': self.environment,
|
||||
'llm_metrics_total_tokens': self.llm_metrics['total_tokens'],
|
||||
'llm_metrics_prompt_tokens': self.llm_metrics['prompt_tokens'],
|
||||
'llm_metrics_completion_tokens': self.llm_metrics['completion_tokens'],
|
||||
'llm_metrics_total_time': self.llm_metrics['total_time'],
|
||||
'llm_metrics_call_count': self.llm_metrics['call_count'],
|
||||
'llm_interaction_type': self.llm_metrics['interaction_type'],
|
||||
}
|
||||
# log to Graylog
|
||||
getattr(logger, level)(message, extra=log_data)
|
||||
|
||||
# Log to database
|
||||
event_log = BusinessEventLog(
|
||||
timestamp=dt.now(tz=tz.utc),
|
||||
event_type=self.event_type,
|
||||
tenant_id=self.tenant_id,
|
||||
trace_id=self.trace_id,
|
||||
span_id=self.span_id,
|
||||
span_name=self.span_name,
|
||||
parent_span_id=self.parent_span_id,
|
||||
document_version_id=self.document_version_id,
|
||||
document_version_file_size=self.document_version_file_size,
|
||||
chat_session_id=self.chat_session_id,
|
||||
interaction_id=self.interaction_id,
|
||||
environment=self.environment,
|
||||
llm_metrics_total_tokens=self.llm_metrics['total_tokens'],
|
||||
llm_metrics_prompt_tokens=self.llm_metrics['prompt_tokens'],
|
||||
llm_metrics_completion_tokens=self.llm_metrics['completion_tokens'],
|
||||
llm_metrics_total_time=self.llm_metrics['total_time'],
|
||||
llm_metrics_call_count=self.llm_metrics['call_count'],
|
||||
llm_interaction_type=self.llm_metrics['interaction_type'],
|
||||
message=message
|
||||
)
|
||||
db.session.add(event_log)
|
||||
db.session.commit()
|
||||
|
||||
def __enter__(self):
|
||||
self.log(f'Starting Trace for {self.event_type}')
|
||||
return BusinessEventContext(self).__enter__()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.llm_metrics['call_count'] > 0:
|
||||
self.log_final_metrics()
|
||||
self.reset_llm_metrics()
|
||||
self.log(f'Ending Trace for {self.event_type}')
|
||||
return BusinessEventContext(self).__exit__(exc_type, exc_val, exc_tb)
|
||||
25
common/utils/business_event_context.py
Normal file
25
common/utils/business_event_context.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from werkzeug.local import LocalProxy, LocalStack
|
||||
|
||||
_business_event_stack = LocalStack()
|
||||
|
||||
|
||||
def _get_current_event():
|
||||
top = _business_event_stack.top
|
||||
if top is None:
|
||||
raise RuntimeError("No business event context found. Are you sure you're in a business event?")
|
||||
return top
|
||||
|
||||
|
||||
current_event = LocalProxy(_get_current_event)
|
||||
|
||||
|
||||
class BusinessEventContext:
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def __enter__(self):
|
||||
_business_event_stack.push(self.event)
|
||||
return self.event
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
_business_event_stack.pop()
|
||||
@@ -1,14 +1,16 @@
|
||||
from celery import Celery
|
||||
from kombu import Queue
|
||||
from werkzeug.local import LocalProxy
|
||||
from redbeat import RedBeatScheduler
|
||||
|
||||
celery_app = Celery()
|
||||
|
||||
|
||||
def init_celery(celery, app):
|
||||
def init_celery(celery, app, is_beat=False):
|
||||
celery_app.main = app.name
|
||||
app.logger.debug(f'CELERY_BROKER_URL: {app.config["CELERY_BROKER_URL"]}')
|
||||
app.logger.debug(f'CELERY_RESULT_BACKEND: {app.config["CELERY_RESULT_BACKEND"]}')
|
||||
|
||||
celery_config = {
|
||||
'broker_url': app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379/0'),
|
||||
'result_backend': app.config.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0'),
|
||||
@@ -17,19 +19,40 @@ def init_celery(celery, app):
|
||||
'accept_content': app.config.get('CELERY_ACCEPT_CONTENT', ['json']),
|
||||
'timezone': app.config.get('CELERY_TIMEZONE', 'UTC'),
|
||||
'enable_utc': app.config.get('CELERY_ENABLE_UTC', True),
|
||||
'task_routes': {'eveai_worker.tasks.create_embeddings': {'queue': 'embeddings',
|
||||
'routing_key': 'embeddings.create_embeddings'}},
|
||||
}
|
||||
|
||||
if is_beat:
|
||||
# Add configurations specific to Beat scheduler
|
||||
celery_config['beat_scheduler'] = 'redbeat.RedBeatScheduler'
|
||||
celery_config['redbeat_lock_key'] = 'redbeat::lock'
|
||||
celery_config['beat_max_loop_interval'] = 10 # Adjust as needed
|
||||
|
||||
celery_app.conf.update(**celery_config)
|
||||
|
||||
# Setting up Celery task queues
|
||||
celery_app.conf.task_queues = (
|
||||
Queue('default', routing_key='task.#'),
|
||||
Queue('embeddings', routing_key='embeddings.#', queue_arguments={'x-max-priority': 10}),
|
||||
Queue('llm_interactions', routing_key='llm_interactions.#', queue_arguments={'x-max-priority': 5}),
|
||||
)
|
||||
# Task queues for workers only
|
||||
if not is_beat:
|
||||
celery_app.conf.task_queues = (
|
||||
Queue('default', routing_key='task.#'),
|
||||
Queue('embeddings', routing_key='embeddings.#', queue_arguments={'x-max-priority': 10}),
|
||||
Queue('llm_interactions', routing_key='llm_interactions.#', queue_arguments={'x-max-priority': 5}),
|
||||
Queue('entitlements', routing_key='entitlements.#', queue_arguments={'x-max-priority': 10}),
|
||||
)
|
||||
celery_app.conf.task_routes = {
|
||||
'eveai_workers.*': { # All tasks from eveai_workers module
|
||||
'queue': 'embeddings',
|
||||
'routing_key': 'embeddings.#',
|
||||
},
|
||||
'eveai_chat_workers.*': { # All tasks from eveai_chat_workers module
|
||||
'queue': 'llm_interactions',
|
||||
'routing_key': 'llm_interactions.#',
|
||||
},
|
||||
'eveai_entitlements.*': { # All tasks from eveai_entitlements module
|
||||
'queue': 'entitlements',
|
||||
'routing_key': 'entitlements.#',
|
||||
}
|
||||
}
|
||||
|
||||
# Ensuring tasks execute with Flask application context
|
||||
# Ensure tasks execute with Flask context
|
||||
class ContextTask(celery.Task):
|
||||
def __call__(self, *args, **kwargs):
|
||||
with app.app_context():
|
||||
@@ -37,6 +60,39 @@ def init_celery(celery, app):
|
||||
|
||||
celery.Task = ContextTask
|
||||
|
||||
# Original init_celery before updating for beat
|
||||
# def init_celery(celery, app):
|
||||
# celery_app.main = app.name
|
||||
# app.logger.debug(f'CELERY_BROKER_URL: {app.config["CELERY_BROKER_URL"]}')
|
||||
# app.logger.debug(f'CELERY_RESULT_BACKEND: {app.config["CELERY_RESULT_BACKEND"]}')
|
||||
# celery_config = {
|
||||
# 'broker_url': app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379/0'),
|
||||
# 'result_backend': app.config.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0'),
|
||||
# 'task_serializer': app.config.get('CELERY_TASK_SERIALIZER', 'json'),
|
||||
# 'result_serializer': app.config.get('CELERY_RESULT_SERIALIZER', 'json'),
|
||||
# 'accept_content': app.config.get('CELERY_ACCEPT_CONTENT', ['json']),
|
||||
# 'timezone': app.config.get('CELERY_TIMEZONE', 'UTC'),
|
||||
# 'enable_utc': app.config.get('CELERY_ENABLE_UTC', True),
|
||||
# 'task_routes': {'eveai_worker.tasks.create_embeddings': {'queue': 'embeddings',
|
||||
# 'routing_key': 'embeddings.create_embeddings'}},
|
||||
# }
|
||||
# celery_app.conf.update(**celery_config)
|
||||
#
|
||||
# # Setting up Celery task queues
|
||||
# celery_app.conf.task_queues = (
|
||||
# Queue('default', routing_key='task.#'),
|
||||
# Queue('embeddings', routing_key='embeddings.#', queue_arguments={'x-max-priority': 10}),
|
||||
# Queue('llm_interactions', routing_key='llm_interactions.#', queue_arguments={'x-max-priority': 5}),
|
||||
# )
|
||||
#
|
||||
# # Ensuring tasks execute with Flask application context
|
||||
# class ContextTask(celery.Task):
|
||||
# def __call__(self, *args, **kwargs):
|
||||
# with app.app_context():
|
||||
# return self.run(*args, **kwargs)
|
||||
#
|
||||
# celery.Task = ContextTask
|
||||
|
||||
|
||||
def make_celery(app_name, config):
|
||||
return celery_app
|
||||
|
||||
@@ -23,6 +23,14 @@ def cors_after_request(response, prefix):
|
||||
current_app.logger.debug(f'request.args: {request.args}')
|
||||
current_app.logger.debug(f'request is json?: {request.is_json}')
|
||||
|
||||
# Exclude health checks from checks
|
||||
if request.path.startswith('/healthz') or request.path.startswith('/_healthz'):
|
||||
current_app.logger.debug('Skipping CORS headers for health checks')
|
||||
response.headers.add('Access-Control-Allow-Origin', '*')
|
||||
response.headers.add('Access-Control-Allow-Headers', '*')
|
||||
response.headers.add('Access-Control-Allow-Methods', '*')
|
||||
return response
|
||||
|
||||
tenant_id = None
|
||||
allowed_origins = []
|
||||
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from werkzeug.utils import secure_filename
|
||||
from common.models.document import Document, DocumentVersion
|
||||
from common.extensions import db, minio_client
|
||||
from common.utils.celery_utils import current_celery
|
||||
from flask import current_app
|
||||
from flask_security import current_user
|
||||
import requests
|
||||
from urllib.parse import urlparse, unquote
|
||||
import os
|
||||
from .eveai_exceptions import EveAIInvalidLanguageException, EveAIDoubleURLException, EveAIUnsupportedFileType, \
|
||||
EveAIYoutubeError
|
||||
from .eveai_exceptions import EveAIInvalidLanguageException, EveAIDoubleURLException, EveAIUnsupportedFileType
|
||||
from ..models.user import Tenant
|
||||
|
||||
|
||||
def create_document_stack(api_input, file, filename, extension, tenant_id):
|
||||
@@ -21,7 +24,8 @@ def create_document_stack(api_input, file, filename, extension, tenant_id):
|
||||
new_doc_vers = create_version_for_document(new_doc,
|
||||
api_input.get('url', ''),
|
||||
api_input.get('language', 'en'),
|
||||
api_input.get('user_context', '')
|
||||
api_input.get('user_context', ''),
|
||||
api_input.get('user_metadata'),
|
||||
)
|
||||
db.session.add(new_doc_vers)
|
||||
|
||||
@@ -58,7 +62,7 @@ def create_document(form, filename, tenant_id):
|
||||
return new_doc
|
||||
|
||||
|
||||
def create_version_for_document(document, url, language, user_context):
|
||||
def create_version_for_document(document, url, language, user_context, user_metadata):
|
||||
new_doc_vers = DocumentVersion()
|
||||
if url != '':
|
||||
new_doc_vers.url = url
|
||||
@@ -71,30 +75,37 @@ def create_version_for_document(document, url, language, user_context):
|
||||
if user_context != '':
|
||||
new_doc_vers.user_context = user_context
|
||||
|
||||
if user_metadata != '' and user_metadata is not None:
|
||||
new_doc_vers.user_metadata = user_metadata
|
||||
|
||||
new_doc_vers.document = document
|
||||
|
||||
set_logging_information(new_doc_vers, dt.now(tz.utc))
|
||||
|
||||
mark_tenant_storage_dirty(document.tenant_id)
|
||||
|
||||
return new_doc_vers
|
||||
|
||||
|
||||
def upload_file_for_version(doc_vers, file, extension, tenant_id):
|
||||
doc_vers.file_type = extension
|
||||
doc_vers.file_name = doc_vers.calc_file_name()
|
||||
doc_vers.file_location = doc_vers.calc_file_location()
|
||||
|
||||
# Normally, the tenant bucket should exist. But let's be on the safe side if a migration took place.
|
||||
minio_client.create_tenant_bucket(tenant_id)
|
||||
|
||||
try:
|
||||
minio_client.upload_document_file(
|
||||
bn, on, size = minio_client.upload_document_file(
|
||||
tenant_id,
|
||||
doc_vers.doc_id,
|
||||
doc_vers.language,
|
||||
doc_vers.id,
|
||||
doc_vers.file_name,
|
||||
f"{doc_vers.id}.{extension}",
|
||||
file
|
||||
)
|
||||
doc_vers.bucket_name = bn
|
||||
doc_vers.object_name = on
|
||||
doc_vers.file_size = size / 1048576 # Convert bytes to MB
|
||||
|
||||
db.session.commit()
|
||||
current_app.logger.info(f'Successfully saved document to MinIO for tenant {tenant_id} for '
|
||||
f'document version {doc_vers.id} while uploading file.')
|
||||
@@ -109,10 +120,30 @@ def set_logging_information(obj, timestamp):
|
||||
obj.created_at = timestamp
|
||||
obj.updated_at = timestamp
|
||||
|
||||
user_id = get_current_user_id()
|
||||
if user_id:
|
||||
obj.created_by = user_id
|
||||
obj.updated_by = user_id
|
||||
|
||||
|
||||
def update_logging_information(obj, timestamp):
|
||||
obj.updated_at = timestamp
|
||||
|
||||
user_id = get_current_user_id()
|
||||
if user_id:
|
||||
obj.updated_by = user_id
|
||||
|
||||
|
||||
def get_current_user_id():
|
||||
try:
|
||||
if current_user and current_user.is_authenticated:
|
||||
return current_user.id
|
||||
else:
|
||||
return None
|
||||
except Exception:
|
||||
# This will catch any errors if current_user is not available (e.g., in API context)
|
||||
return None
|
||||
|
||||
|
||||
def get_extension_from_content_type(content_type):
|
||||
content_type_map = {
|
||||
@@ -190,32 +221,10 @@ def process_multiple_urls(urls, tenant_id, api_input):
|
||||
return results
|
||||
|
||||
|
||||
def prepare_youtube_document(url, tenant_id, api_input):
|
||||
try:
|
||||
filename = f"placeholder.youtube"
|
||||
extension = 'youtube'
|
||||
|
||||
new_doc = create_document(api_input, filename, tenant_id)
|
||||
new_doc_vers = create_version_for_document(new_doc, url, api_input['language'], api_input['user_context'])
|
||||
|
||||
new_doc_vers.file_type = extension
|
||||
new_doc_vers.file_name = new_doc_vers.calc_file_name()
|
||||
new_doc_vers.file_location = new_doc_vers.calc_file_location()
|
||||
|
||||
db.session.add(new_doc)
|
||||
db.session.add(new_doc_vers)
|
||||
db.session.commit()
|
||||
|
||||
return new_doc, new_doc_vers
|
||||
except Exception as e:
|
||||
raise EveAIYoutubeError(f"Error preparing YouTube document: {str(e)}")
|
||||
|
||||
|
||||
def start_embedding_task(tenant_id, doc_vers_id):
|
||||
task = current_celery.send_task('create_embeddings', queue='embeddings', args=[
|
||||
tenant_id,
|
||||
doc_vers_id,
|
||||
])
|
||||
task = current_celery.send_task('create_embeddings',
|
||||
args=[tenant_id, doc_vers_id,],
|
||||
queue='embeddings')
|
||||
current_app.logger.info(f'Embedding creation started for tenant {tenant_id}, '
|
||||
f'Document Version {doc_vers_id}. '
|
||||
f'Embedding creation task: {task.id}')
|
||||
@@ -228,3 +237,113 @@ def validate_file_type(extension):
|
||||
if extension not in current_app.config['SUPPORTED_FILE_TYPES']:
|
||||
raise EveAIUnsupportedFileType(f"Filetype {extension} is currently not supported. "
|
||||
f"Supported filetypes: {', '.join(current_app.config['SUPPORTED_FILE_TYPES'])}")
|
||||
|
||||
|
||||
def get_filename_from_url(url):
|
||||
parsed_url = urlparse(url)
|
||||
path_parts = parsed_url.path.split('/')
|
||||
filename = path_parts[-1]
|
||||
if filename == '':
|
||||
filename = 'index'
|
||||
if not filename.endswith('.html'):
|
||||
filename += '.html'
|
||||
return filename
|
||||
|
||||
|
||||
def get_documents_list(page, per_page):
|
||||
query = Document.query.order_by(desc(Document.created_at))
|
||||
pagination = query.paginate(page=page, per_page=per_page, error_out=False)
|
||||
return pagination
|
||||
|
||||
|
||||
def edit_document(document_id, name, valid_from, valid_to):
|
||||
doc = Document.query.get_or_404(document_id)
|
||||
doc.name = name
|
||||
doc.valid_from = valid_from
|
||||
doc.valid_to = valid_to
|
||||
update_logging_information(doc, dt.now(tz.utc))
|
||||
|
||||
try:
|
||||
db.session.add(doc)
|
||||
db.session.commit()
|
||||
return doc, None
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
return None, str(e)
|
||||
|
||||
|
||||
def edit_document_version(version_id, user_context):
|
||||
doc_vers = DocumentVersion.query.get_or_404(version_id)
|
||||
doc_vers.user_context = user_context
|
||||
update_logging_information(doc_vers, dt.now(tz.utc))
|
||||
|
||||
try:
|
||||
db.session.add(doc_vers)
|
||||
db.session.commit()
|
||||
return doc_vers, None
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
return None, str(e)
|
||||
|
||||
|
||||
def refresh_document_with_info(doc_id, api_input):
|
||||
doc = Document.query.get_or_404(doc_id)
|
||||
old_doc_vers = DocumentVersion.query.filter_by(doc_id=doc_id).order_by(desc(DocumentVersion.id)).first()
|
||||
|
||||
if not old_doc_vers.url:
|
||||
return None, "This document has no URL. Only documents with a URL can be refreshed."
|
||||
|
||||
new_doc_vers = create_version_for_document(
|
||||
doc,
|
||||
old_doc_vers.url,
|
||||
api_input.get('language', old_doc_vers.language),
|
||||
api_input.get('user_context', old_doc_vers.user_context),
|
||||
api_input.get('user_metadata', old_doc_vers.user_metadata)
|
||||
)
|
||||
|
||||
set_logging_information(new_doc_vers, dt.now(tz.utc))
|
||||
|
||||
try:
|
||||
db.session.add(new_doc_vers)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
return None, str(e)
|
||||
|
||||
response = requests.head(old_doc_vers.url, allow_redirects=True)
|
||||
content_type = response.headers.get('Content-Type', '').split(';')[0]
|
||||
extension = get_extension_from_content_type(content_type)
|
||||
|
||||
response = requests.get(old_doc_vers.url)
|
||||
response.raise_for_status()
|
||||
file_content = response.content
|
||||
|
||||
upload_file_for_version(new_doc_vers, file_content, extension, doc.tenant_id)
|
||||
|
||||
task = current_celery.send_task('create_embeddings', args=[doc.tenant_id, new_doc_vers.id,], queue='embeddings')
|
||||
current_app.logger.info(f'Embedding creation started for document {doc_id} on version {new_doc_vers.id} '
|
||||
f'with task id: {task.id}.')
|
||||
|
||||
return new_doc_vers, task.id
|
||||
|
||||
|
||||
# Update the existing refresh_document function to use the new refresh_document_with_info
|
||||
def refresh_document(doc_id):
|
||||
current_app.logger.info(f'Refreshing document {doc_id}')
|
||||
doc = Document.query.get_or_404(doc_id)
|
||||
old_doc_vers = DocumentVersion.query.filter_by(doc_id=doc_id).order_by(desc(DocumentVersion.id)).first()
|
||||
|
||||
api_input = {
|
||||
'language': old_doc_vers.language,
|
||||
'user_context': old_doc_vers.user_context,
|
||||
'user_metadata': old_doc_vers.user_metadata
|
||||
}
|
||||
|
||||
return refresh_document_with_info(doc_id, api_input)
|
||||
|
||||
|
||||
# Function triggered when a document_version is created or updated
|
||||
def mark_tenant_storage_dirty(tenant_id):
|
||||
tenant = db.session.query(Tenant).filter_by(id=tenant_id).first()
|
||||
tenant.storage_dirty = True
|
||||
db.session.commit()
|
||||
|
||||
@@ -34,10 +34,10 @@ class EveAIUnsupportedFileType(EveAIException):
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
|
||||
class EveAIYoutubeError(EveAIException):
|
||||
"""Raised when adding a Youtube document fails"""
|
||||
class EveAINoLicenseForTenant(EveAIException):
|
||||
"""Raised when no active license for a tenant is provided"""
|
||||
|
||||
def __init__(self, message="Youtube document creation failed", status_code=400, payload=None):
|
||||
def __init__(self, message="No license for tenant found", status_code=400, payload=None):
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
# Add more custom exceptions as needed
|
||||
|
||||
|
||||
@@ -50,13 +50,11 @@ class MinioClient:
|
||||
self.client.put_object(
|
||||
bucket_name, object_name, io.BytesIO(file_data), len(file_data)
|
||||
)
|
||||
return True
|
||||
return bucket_name, object_name, len(file_data)
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while uploading file: {err}")
|
||||
|
||||
def download_document_file(self, tenant_id, document_id, language, version_id, filename):
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
object_name = self.generate_object_name(document_id, language, version_id, filename)
|
||||
def download_document_file(self, tenant_id, bucket_name, object_name):
|
||||
try:
|
||||
response = self.client.get_object(bucket_name, object_name)
|
||||
return response.read()
|
||||
|
||||
@@ -5,14 +5,19 @@ from flask import current_app
|
||||
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
import ast
|
||||
from typing import List
|
||||
from typing import List, Any, Iterator
|
||||
from collections.abc import MutableMapping
|
||||
from openai import OpenAI
|
||||
# from groq import Groq
|
||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||
from portkey_ai.langchain.portkey_langchain_callback_handler import LangchainCallbackHandler
|
||||
|
||||
from common.langchain.llm_metrics_handler import LLMMetricsHandler
|
||||
from common.langchain.tracked_openai_embeddings import TrackedOpenAIEmbeddings
|
||||
from common.langchain.tracked_transcribe import tracked_transcribe
|
||||
from common.models.document import EmbeddingSmallOpenAI, EmbeddingLargeOpenAI
|
||||
from common.models.user import Tenant
|
||||
from config.model_config import MODEL_CONFIG
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class CitedAnswer(BaseModel):
|
||||
@@ -36,180 +41,192 @@ def set_language_prompt_template(cls, language_prompt):
|
||||
cls.__doc__ = language_prompt
|
||||
|
||||
|
||||
class ModelVariables(MutableMapping):
|
||||
def __init__(self, tenant: Tenant):
|
||||
self.tenant = tenant
|
||||
self._variables = self._initialize_variables()
|
||||
self._embedding_model = None
|
||||
self._llm = None
|
||||
self._llm_no_rag = None
|
||||
self._transcription_client = None
|
||||
self._prompt_templates = {}
|
||||
self._embedding_db_model = None
|
||||
self.llm_metrics_handler = LLMMetricsHandler()
|
||||
self._transcription_client = None
|
||||
|
||||
def _initialize_variables(self):
|
||||
variables = {}
|
||||
|
||||
# We initialize the variables that are available knowing the tenant. For the other, we will apply 'lazy loading'
|
||||
variables['k'] = self.tenant.es_k or 5
|
||||
variables['similarity_threshold'] = self.tenant.es_similarity_threshold or 0.7
|
||||
variables['RAG_temperature'] = self.tenant.chat_RAG_temperature or 0.3
|
||||
variables['no_RAG_temperature'] = self.tenant.chat_no_RAG_temperature or 0.5
|
||||
variables['embed_tuning'] = self.tenant.embed_tuning or False
|
||||
variables['rag_tuning'] = self.tenant.rag_tuning or False
|
||||
variables['rag_context'] = self.tenant.rag_context or " "
|
||||
|
||||
# Set HTML Chunking Variables
|
||||
variables['html_tags'] = self.tenant.html_tags
|
||||
variables['html_end_tags'] = self.tenant.html_end_tags
|
||||
variables['html_included_elements'] = self.tenant.html_included_elements
|
||||
variables['html_excluded_elements'] = self.tenant.html_excluded_elements
|
||||
variables['html_excluded_classes'] = self.tenant.html_excluded_classes
|
||||
|
||||
# Set Chunk Size variables
|
||||
variables['min_chunk_size'] = self.tenant.min_chunk_size
|
||||
variables['max_chunk_size'] = self.tenant.max_chunk_size
|
||||
|
||||
# Set model providers
|
||||
variables['embedding_provider'], variables['embedding_model'] = self.tenant.embedding_model.rsplit('.', 1)
|
||||
variables['llm_provider'], variables['llm_model'] = self.tenant.llm_model.rsplit('.', 1)
|
||||
variables["templates"] = current_app.config['PROMPT_TEMPLATES'][(f"{variables['llm_provider']}."
|
||||
f"{variables['llm_model']}")]
|
||||
current_app.logger.info(f"Loaded prompt templates: \n")
|
||||
current_app.logger.info(f"{variables['templates']}")
|
||||
|
||||
# Set model-specific configurations
|
||||
model_config = MODEL_CONFIG.get(variables['llm_provider'], {}).get(variables['llm_model'], {})
|
||||
variables.update(model_config)
|
||||
|
||||
variables['annotation_chunk_length'] = current_app.config['ANNOTATION_TEXT_CHUNK_LENGTH'][self.tenant.llm_model]
|
||||
|
||||
if variables['tool_calling_supported']:
|
||||
variables['cited_answer_cls'] = CitedAnswer
|
||||
|
||||
variables['max_compression_duration'] = current_app.config['MAX_COMPRESSION_DURATION']
|
||||
variables['max_transcription_duration'] = current_app.config['MAX_TRANSCRIPTION_DURATION']
|
||||
variables['compression_cpu_limit'] = current_app.config['COMPRESSION_CPU_LIMIT']
|
||||
variables['compression_process_delay'] = current_app.config['COMPRESSION_PROCESS_DELAY']
|
||||
|
||||
return variables
|
||||
|
||||
@property
|
||||
def embedding_model(self):
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
model = self._variables['embedding_model']
|
||||
self._embedding_model = TrackedOpenAIEmbeddings(api_key=api_key,
|
||||
model=model,
|
||||
)
|
||||
self._embedding_db_model = EmbeddingSmallOpenAI \
|
||||
if model == 'text-embedding-3-small' \
|
||||
else EmbeddingLargeOpenAI
|
||||
|
||||
return self._embedding_model
|
||||
|
||||
@property
|
||||
def llm(self):
|
||||
api_key = self.get_api_key_for_llm()
|
||||
self._llm = ChatOpenAI(api_key=api_key,
|
||||
model=self._variables['llm_model'],
|
||||
temperature=self._variables['RAG_temperature'],
|
||||
callbacks=[self.llm_metrics_handler])
|
||||
return self._llm
|
||||
|
||||
@property
|
||||
def llm_no_rag(self):
|
||||
api_key = self.get_api_key_for_llm()
|
||||
self._llm_no_rag = ChatOpenAI(api_key=api_key,
|
||||
model=self._variables['llm_model'],
|
||||
temperature=self._variables['RAG_temperature'],
|
||||
callbacks=[self.llm_metrics_handler])
|
||||
return self._llm_no_rag
|
||||
|
||||
def get_api_key_for_llm(self):
|
||||
if self._variables['llm_provider'] == 'openai':
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
else: # self._variables['llm_provider'] == 'anthropic'
|
||||
api_key = os.getenv('ANTHROPIC_API_KEY')
|
||||
|
||||
return api_key
|
||||
|
||||
@property
|
||||
def transcription_client(self):
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
self._transcription_client = OpenAI(api_key=api_key, )
|
||||
self._variables['transcription_model'] = 'whisper-1'
|
||||
return self._transcription_client
|
||||
|
||||
def transcribe(self, *args, **kwargs):
|
||||
return tracked_transcribe(self._transcription_client, *args, **kwargs)
|
||||
|
||||
@property
|
||||
def embedding_db_model(self):
|
||||
if self._embedding_db_model is None:
|
||||
self._embedding_db_model = self.get_embedding_db_model()
|
||||
return self._embedding_db_model
|
||||
|
||||
def get_embedding_db_model(self):
|
||||
current_app.logger.debug("In get_embedding_db_model")
|
||||
if self._embedding_db_model is None:
|
||||
self._embedding_db_model = EmbeddingSmallOpenAI \
|
||||
if self._variables['embedding_model'] == 'text-embedding-3-small' \
|
||||
else EmbeddingLargeOpenAI
|
||||
current_app.logger.debug(f"Embedding DB Model: {self._embedding_db_model}")
|
||||
return self._embedding_db_model
|
||||
|
||||
def get_prompt_template(self, template_name: str) -> str:
|
||||
current_app.logger.info(f"Getting prompt template for {template_name}")
|
||||
if template_name not in self._prompt_templates:
|
||||
self._prompt_templates[template_name] = self._load_prompt_template(template_name)
|
||||
return self._prompt_templates[template_name]
|
||||
|
||||
def _load_prompt_template(self, template_name: str) -> str:
|
||||
# In the future, this method will make an API call to Portkey
|
||||
# For now, we'll simulate it with a placeholder implementation
|
||||
# You can replace this with your current prompt loading logic
|
||||
return self._variables['templates'][template_name]
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
current_app.logger.debug(f"ModelVariables: Getting {key}")
|
||||
# Support older template names (suffix = _template)
|
||||
if key.endswith('_template'):
|
||||
key = key[:-len('_template')]
|
||||
current_app.logger.debug(f"ModelVariables: Getting modified {key}")
|
||||
if key == 'embedding_model':
|
||||
return self.embedding_model
|
||||
elif key == 'embedding_db_model':
|
||||
return self.embedding_db_model
|
||||
elif key == 'llm':
|
||||
return self.llm
|
||||
elif key == 'llm_no_rag':
|
||||
return self.llm_no_rag
|
||||
elif key == 'transcription_client':
|
||||
return self.transcription_client
|
||||
elif key in self._variables.get('prompt_templates', []):
|
||||
return self.get_prompt_template(key)
|
||||
return self._variables.get(key)
|
||||
|
||||
def __setitem__(self, key: str, value: Any) -> None:
|
||||
self._variables[key] = value
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
del self._variables[key]
|
||||
|
||||
def __iter__(self) -> Iterator[str]:
|
||||
return iter(self._variables)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._variables)
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
return self.__getitem__(key) or default
|
||||
|
||||
def update(self, **kwargs) -> None:
|
||||
self._variables.update(kwargs)
|
||||
|
||||
def items(self):
|
||||
return self._variables.items()
|
||||
|
||||
def keys(self):
|
||||
return self._variables.keys()
|
||||
|
||||
def values(self):
|
||||
return self._variables.values()
|
||||
|
||||
|
||||
def select_model_variables(tenant):
|
||||
embedding_provider = tenant.embedding_model.rsplit('.', 1)[0]
|
||||
embedding_model = tenant.embedding_model.rsplit('.', 1)[1]
|
||||
|
||||
llm_provider = tenant.llm_model.rsplit('.', 1)[0]
|
||||
llm_model = tenant.llm_model.rsplit('.', 1)[1]
|
||||
|
||||
# Set model variables
|
||||
model_variables = {}
|
||||
if tenant.es_k:
|
||||
model_variables['k'] = tenant.es_k
|
||||
else:
|
||||
model_variables['k'] = 5
|
||||
|
||||
if tenant.es_similarity_threshold:
|
||||
model_variables['similarity_threshold'] = tenant.es_similarity_threshold
|
||||
else:
|
||||
model_variables['similarity_threshold'] = 0.7
|
||||
|
||||
if tenant.chat_RAG_temperature:
|
||||
model_variables['RAG_temperature'] = tenant.chat_RAG_temperature
|
||||
else:
|
||||
model_variables['RAG_temperature'] = 0.3
|
||||
|
||||
if tenant.chat_no_RAG_temperature:
|
||||
model_variables['no_RAG_temperature'] = tenant.chat_no_RAG_temperature
|
||||
else:
|
||||
model_variables['no_RAG_temperature'] = 0.5
|
||||
|
||||
# Set Tuning variables
|
||||
if tenant.embed_tuning:
|
||||
model_variables['embed_tuning'] = tenant.embed_tuning
|
||||
else:
|
||||
model_variables['embed_tuning'] = False
|
||||
|
||||
if tenant.rag_tuning:
|
||||
model_variables['rag_tuning'] = tenant.rag_tuning
|
||||
else:
|
||||
model_variables['rag_tuning'] = False
|
||||
|
||||
if tenant.rag_context:
|
||||
model_variables['rag_context'] = tenant.rag_context
|
||||
else:
|
||||
model_variables['rag_context'] = " "
|
||||
|
||||
# Set HTML Chunking Variables
|
||||
model_variables['html_tags'] = tenant.html_tags
|
||||
model_variables['html_end_tags'] = tenant.html_end_tags
|
||||
model_variables['html_included_elements'] = tenant.html_included_elements
|
||||
model_variables['html_excluded_elements'] = tenant.html_excluded_elements
|
||||
model_variables['html_excluded_classes'] = tenant.html_excluded_classes
|
||||
|
||||
# Set Chunk Size variables
|
||||
model_variables['min_chunk_size'] = tenant.min_chunk_size
|
||||
model_variables['max_chunk_size'] = tenant.max_chunk_size
|
||||
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
portkey_metadata = {'tenant_id': str(tenant.id), 'environment': environment}
|
||||
|
||||
# Set Embedding variables
|
||||
match embedding_provider:
|
||||
case 'openai':
|
||||
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
||||
provider='openai',
|
||||
metadata=portkey_metadata)
|
||||
match embedding_model:
|
||||
case 'text-embedding-3-small':
|
||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
||||
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
||||
model='text-embedding-3-small',
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers
|
||||
)
|
||||
model_variables['embedding_db_model'] = EmbeddingSmallOpenAI
|
||||
case 'text-embedding-3-large':
|
||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
||||
model_variables['embedding_model'] = OpenAIEmbeddings(api_key=api_key,
|
||||
model='text-embedding-3-large',
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers
|
||||
)
|
||||
model_variables['embedding_db_model'] = EmbeddingLargeOpenAI
|
||||
case _:
|
||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
||||
f'error: Invalid embedding model')
|
||||
case _:
|
||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
||||
f'error: Invalid embedding provider')
|
||||
|
||||
# Set Chat model variables
|
||||
match llm_provider:
|
||||
case 'openai':
|
||||
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
||||
metadata=portkey_metadata,
|
||||
provider='openai')
|
||||
tool_calling_supported = False
|
||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
||||
model_variables['llm'] = ChatOpenAI(api_key=api_key,
|
||||
model=llm_model,
|
||||
temperature=model_variables['RAG_temperature'],
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
model_variables['llm_no_rag'] = ChatOpenAI(api_key=api_key,
|
||||
model=llm_model,
|
||||
temperature=model_variables['no_RAG_temperature'],
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
tool_calling_supported = False
|
||||
match llm_model:
|
||||
case 'gpt-4o' | 'gpt-4o-mini':
|
||||
tool_calling_supported = True
|
||||
PDF_chunk_size = 10000
|
||||
PDF_chunk_overlap = 200
|
||||
PDF_min_chunk_size = 8000
|
||||
PDF_max_chunk_size = 12000
|
||||
case _:
|
||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
||||
f'error: Invalid chat model')
|
||||
case 'anthropic':
|
||||
api_key = current_app.config.get('ANTHROPIC_API_KEY')
|
||||
# Anthropic does not have the same 'generic' model names as OpenAI
|
||||
llm_model_ext = current_app.config.get('ANTHROPIC_LLM_VERSIONS').get(llm_model)
|
||||
model_variables['llm'] = ChatAnthropic(api_key=api_key,
|
||||
model=llm_model_ext,
|
||||
temperature=model_variables['RAG_temperature'])
|
||||
model_variables['llm_no_rag'] = ChatAnthropic(api_key=api_key,
|
||||
model=llm_model_ext,
|
||||
temperature=model_variables['RAG_temperature'])
|
||||
tool_calling_supported = True
|
||||
PDF_chunk_size = 10000
|
||||
PDF_chunk_overlap = 200
|
||||
PDF_min_chunk_size = 8000
|
||||
PDF_max_chunk_size = 12000
|
||||
case _:
|
||||
raise Exception(f'Error setting model variables for tenant {tenant.id} '
|
||||
f'error: Invalid chat provider')
|
||||
|
||||
model_variables['PDF_chunk_size'] = PDF_chunk_size
|
||||
model_variables['PDF_chunk_overlap'] = PDF_chunk_overlap
|
||||
model_variables['PDF_min_chunk_size'] = PDF_min_chunk_size
|
||||
model_variables['PDF_max_chunk_size'] = PDF_max_chunk_size
|
||||
|
||||
if tool_calling_supported:
|
||||
model_variables['cited_answer_cls'] = CitedAnswer
|
||||
|
||||
templates = current_app.config['PROMPT_TEMPLATES'][f'{llm_provider}.{llm_model}']
|
||||
model_variables['summary_template'] = templates['summary']
|
||||
model_variables['rag_template'] = templates['rag']
|
||||
model_variables['history_template'] = templates['history']
|
||||
model_variables['encyclopedia_template'] = templates['encyclopedia']
|
||||
model_variables['transcript_template'] = templates['transcript']
|
||||
model_variables['html_parse_template'] = templates['html_parse']
|
||||
model_variables['pdf_parse_template'] = templates['pdf_parse']
|
||||
|
||||
model_variables['annotation_chunk_length'] = current_app.config['ANNOTATION_TEXT_CHUNK_LENGTH'][tenant.llm_model]
|
||||
|
||||
# Transcription Client Variables.
|
||||
# Using Groq
|
||||
# api_key = current_app.config.get('GROQ_API_KEY')
|
||||
# model_variables['transcription_client'] = Groq(api_key=api_key)
|
||||
# model_variables['transcription_model'] = 'whisper-large-v3'
|
||||
|
||||
# Using OpenAI for transcriptions
|
||||
portkey_metadata = {'tenant_id': str(tenant.id)}
|
||||
portkey_headers = createHeaders(api_key=current_app.config.get('PORTKEY_API_KEY'),
|
||||
metadata=portkey_metadata,
|
||||
provider='openai'
|
||||
)
|
||||
api_key = current_app.config.get('OPENAI_API_KEY')
|
||||
model_variables['transcription_client'] = OpenAI(api_key=api_key,
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
model_variables['transcription_model'] = 'whisper-1'
|
||||
|
||||
model_variables = ModelVariables(tenant=tenant)
|
||||
return model_variables
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ def prefixed_url_for(endpoint, **values):
|
||||
prefix = request.headers.get('X-Forwarded-Prefix', '')
|
||||
scheme = request.headers.get('X-Forwarded-Proto', request.scheme)
|
||||
host = request.headers.get('Host', request.host)
|
||||
current_app.logger.debug(f'prefix: {prefix}, scheme: {scheme}, host: {host}')
|
||||
|
||||
external = values.pop('_external', False)
|
||||
generated_url = url_for(endpoint, **values)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from flask import flash
|
||||
from flask import flash, current_app
|
||||
|
||||
|
||||
def prepare_table(model_objects, column_names):
|
||||
@@ -44,7 +44,8 @@ def form_validation_failed(request, form):
|
||||
for fieldName, errorMessages in form.errors.items():
|
||||
for err in errorMessages:
|
||||
flash(f"Error in {fieldName}: {err}", 'danger')
|
||||
current_app.logger.debug(f"Error in {fieldName}: {err}")
|
||||
|
||||
|
||||
def form_to_dict(form):
|
||||
return {field.name: field.data for field in form if field.name != 'csrf_token' and hasattr(field, 'data')}
|
||||
return {field.name: field.data for field in form if field.name != 'csrf_token' and hasattr(field, 'data')}
|
||||
|
||||
@@ -59,6 +59,9 @@ class Config(object):
|
||||
# supported languages
|
||||
SUPPORTED_LANGUAGES = ['en', 'fr', 'nl', 'de', 'es']
|
||||
|
||||
# supported currencies
|
||||
SUPPORTED_CURRENCIES = ['€', '$']
|
||||
|
||||
# supported LLMs
|
||||
SUPPORTED_EMBEDDINGS = ['openai.text-embedding-3-small', 'openai.text-embedding-3-large', 'mistral.mistral-embed']
|
||||
SUPPORTED_LLMS = ['openai.gpt-4o', 'anthropic.claude-3-5-sonnet', 'openai.gpt-4o-mini']
|
||||
@@ -107,6 +110,7 @@ class Config(object):
|
||||
|
||||
# JWT settings
|
||||
JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY')
|
||||
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1) # Set token expiry to 1 hour
|
||||
|
||||
# API Encryption
|
||||
API_ENCRYPTION_KEY = environ.get('API_ENCRYPTION_KEY')
|
||||
@@ -136,9 +140,24 @@ class Config(object):
|
||||
MAIL_PASSWORD = environ.get('MAIL_PASSWORD')
|
||||
MAIL_DEFAULT_SENDER = ('eveAI Admin', MAIL_USERNAME)
|
||||
|
||||
# Langsmith settings
|
||||
LANGCHAIN_TRACING_V2 = True
|
||||
LANGCHAIN_ENDPOINT = 'https://api.smith.langchain.com'
|
||||
LANGCHAIN_PROJECT = "eveai"
|
||||
|
||||
|
||||
SUPPORTED_FILE_TYPES = ['pdf', 'html', 'md', 'txt', 'mp3', 'mp4', 'ogg', 'srt']
|
||||
|
||||
TENANT_TYPES = ['Active', 'Demo', 'Inactive', 'Test']
|
||||
|
||||
# The maximum number of seconds allowed for audio compression (to save resources)
|
||||
MAX_COMPRESSION_DURATION = 60*10 # 10 minutes
|
||||
# The maximum number of seconds allowed for transcribing audio
|
||||
MAX_TRANSCRIPTION_DURATION = 60*10 # 10 minutes
|
||||
# Maximum CPU usage for a compression task
|
||||
COMPRESSION_CPU_LIMIT = 50
|
||||
# Delay between compressing chunks in seconds
|
||||
COMPRESSION_PROCESS_DELAY = 1
|
||||
|
||||
|
||||
class DevConfig(Config):
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "eveai-420711",
|
||||
"private_key_id": "e666408e75793321a6134243628346722a71b3a6",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCaGTXCWpq08YD1\nOW4z+gncOlB7T/EIiEwsZgMp6pyUrNioGfiI9YN+uVR0nsUSmFf1YyerRgX7RqD5\nRc7T/OuX8iIvmloK3g7CaFezcVrjnBKcg/QsjDAt/OO3DTk4vykDlh/Kqxx73Jdv\nFH9YSV2H7ToWqIE8CTDnqe8vQS7Bq995c9fPlues31MgndRFg3CFkH0ldfZ4aGm3\n1RnBDyC+9SPQW9e7CJgNN9PWTmOT51Zyy5IRuV5OWePMQaGLVmCo5zNc/EHZEVRu\n1hxJPHL3NNmkYDY8tye8uHgjsAkv8QuwIuUSqnqjoo1/Yg+P0+9GCpePOAJRNxJS\n0YpDFWc5AgMBAAECggEACIU4/hG+bh97BD7JriFhfDDT6bg7g+pCs/hsAlxQ42jv\nOH7pyWuHJXGf5Cwx31usZAq4fcrgYnVpnyl8odIL628y9AjdI66wMuWhZnBFGJgK\nRhHcZWjW8nlXf0lBjwwFe4edzbn1AuWT5fYZ2HWDW2mthY/e8sUwqWPcWsjdifhz\nNR7V+Ia47McKXYgEKjyEObSP1NUOW24zH0DgxS52YPMwa1FoHn6+9Pr8P3TsTSO6\nh6f8tnd81DGl1UH4F5Bj/MHsQXyAMJbu44S4+rZ4Qlk+5xPp9hfCNpxWaHLIkJCg\nYXnC8UAjjyXiqyK0U0RjJf8TS1FxUI4iPepLNqp/pQKBgQDTicZnWFXmCFTnycWp\n66P3Yx0yvlKdUdfnoD/n9NdmUA3TZUlEVfb0IOm7ZFubF/zDTH87XrRiD/NVDbr8\n6bdhA1DXzraxhbfD36Hca6K74Ba4aYJsSWWwI0hL3FDSsv8c7qAIaUF2iwuHb7Y0\nRDcvZqowtQobcQC8cHLc/bI/ZwKBgQC6fMeGaU+lP6jhp9Nb/3Gz5Z1zzCu34IOo\nlgpTNZsowRKYLtjHifrEFi3XRxPKz5thMuJFniof5U4WoMYtRXy+PbgySvBpCia2\nXty05XssnLLMvLpYU5sbQvmOTe20zaIzLohRvvmqrydYIKu62NTubNeuD1L+Zr0q\nz1P5/wUgXwKBgQCW9MrRFQi3j1qHzkVwbOglsmUzwP3TpoQclw8DyIWuTZKQOMeA\nLJh+vr4NLCDzHLsT45MoGv0+vYM4PwQhV+e1I1idqLZXGMV60iv/0A/hYpjUIPch\nr38RoxwEhsRml7XWP7OUTQiaP7+Kdv3fbo6zFOB+wbLkwk90KgrOCX0aIQKBgFeK\n7esmErJjMPdFXk3om0q09nX+mWNHLOb+EDjBiGXYRM9V5oO9PQ/BzaEqh5sEXE+D\noH7H4cR5U3AB5yYnYYi41ngdf7//eO7Rl1AADhOCN9kum1eNX9mrVhU8deMTSRo3\ntNyTBwbeFF0lcRhUY5jNVW4rWW19cz3ed/B6i8CHAoGBAJ/l5rkV74Z5hg6BWNfQ\nYAg/4PLZmjnXIy5QdnWc/PYgbhn5+iVUcL9fSofFzJM1rjFnNcs3S90MGeOmfmo4\nM1WtcQFQbsCGt6+G5uEL/nf74mKUGpOqEM/XSkZ3inweWiDk3LK3iYfXCMBFouIr\n80IlzI1yMf7MVmWn3e1zPjCA\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "eveai-349@eveai-420711.iam.gserviceaccount.com",
|
||||
"client_id": "109927035346319712442",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/eveai-349%40eveai-420711.iam.gserviceaccount.com",
|
||||
"universe_domain": "googleapis.com"
|
||||
}
|
||||
@@ -12,7 +12,12 @@ env = os.environ.get('FLASK_ENV', 'development')
|
||||
class CustomLogRecord(logging.LogRecord):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.component = os.environ.get('COMPONENT_NAME', 'eveai_app') # Set default component value here
|
||||
self.component = os.environ.get('COMPONENT_NAME', 'eveai_app')
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name not in {'event_type', 'tenant_id', 'trace_id', 'span_id', 'span_name', 'parent_span_id',
|
||||
'document_version_id', 'chat_session_id', 'interaction_id', 'environment'}:
|
||||
super().__setattr__(name, value)
|
||||
|
||||
|
||||
def custom_log_record_factory(*args, **kwargs):
|
||||
@@ -68,6 +73,22 @@ LOGGING = {
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'file_beat': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_beat.log',
|
||||
'maxBytes': 1024 * 1024 * 5, # 5MB
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'file_entitlements': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_entitlements.log',
|
||||
'maxBytes': 1024 * 1024 * 5, # 5MB
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'file_sqlalchemy': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
@@ -108,6 +129,14 @@ LOGGING = {
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'file_business_events': {
|
||||
'level': 'INFO',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/business_events.log',
|
||||
'maxBytes': 1024 * 1024 * 5, # 5MB
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'level': 'DEBUG',
|
||||
@@ -159,6 +188,16 @@ LOGGING = {
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_beat': { # logger for the eveai_beat
|
||||
'handlers': ['file_beat', 'graylog', ] if env == 'production' else ['file_beat', ],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_entitlements': { # logger for the eveai_entitlements
|
||||
'handlers': ['file_entitlements', 'graylog', ] if env == 'production' else ['file_entitlements', ],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'sqlalchemy.engine': { # logger for the sqlalchemy
|
||||
'handlers': ['file_sqlalchemy', 'graylog', ] if env == 'production' else ['file_sqlalchemy', ],
|
||||
'level': 'DEBUG',
|
||||
@@ -184,6 +223,11 @@ LOGGING = {
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'business_events': {
|
||||
'handlers': ['file_business_events', 'graylog'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'': { # root logger
|
||||
'handlers': ['console'],
|
||||
'level': 'WARNING', # Set higher level for root to minimize noise
|
||||
|
||||
41
config/model_config.py
Normal file
41
config/model_config.py
Normal file
@@ -0,0 +1,41 @@
|
||||
MODEL_CONFIG = {
|
||||
"openai": {
|
||||
"gpt-4o": {
|
||||
"tool_calling_supported": True,
|
||||
"processing_chunk_size": 10000,
|
||||
"processing_chunk_overlap": 200,
|
||||
"processing_min_chunk_size": 8000,
|
||||
"processing_max_chunk_size": 12000,
|
||||
"prompt_templates": [
|
||||
"summary", "rag", "history", "encyclopedia",
|
||||
"transcript", "html_parse", "pdf_parse"
|
||||
]
|
||||
},
|
||||
"gpt-4o-mini": {
|
||||
"tool_calling_supported": True,
|
||||
"processing_chunk_size": 10000,
|
||||
"processing_chunk_overlap": 200,
|
||||
"processing_min_chunk_size": 8000,
|
||||
"processing_max_chunk_size": 12000,
|
||||
"prompt_templates": [
|
||||
"summary", "rag", "history", "encyclopedia",
|
||||
"transcript", "html_parse", "pdf_parse"
|
||||
]
|
||||
},
|
||||
# Add other OpenAI models here
|
||||
},
|
||||
"anthropic": {
|
||||
"claude-3-5-sonnet": {
|
||||
"tool_calling_supported": True,
|
||||
"processing_chunk_size": 10000,
|
||||
"processing_chunk_overlap": 200,
|
||||
"processing_min_chunk_size": 8000,
|
||||
"processing_max_chunk_size": 12000,
|
||||
"prompt_templates": [
|
||||
"summary", "rag", "history", "encyclopedia",
|
||||
"transcript", "html_parse", "pdf_parse"
|
||||
]
|
||||
},
|
||||
# Add other Anthropic models here
|
||||
},
|
||||
}
|
||||
@@ -65,11 +65,13 @@ encyclopedia: |
|
||||
|
||||
transcript: |
|
||||
You are a top administrative assistant specialized in transforming given transcriptions into markdown formatted files. The generated files will be used to generate embeddings in a RAG-system. The transcriptions originate from podcast, videos and similar material.
|
||||
You may receive information in different chunks. If you're not receiving the first chunk, you'll get the last part of the previous chunk, including it's title in between triple $. Consider this last part and the title as the start of the new chunk.
|
||||
|
||||
|
||||
# Best practices and steps are:
|
||||
- Respect wordings and language(s) used in the transcription. Main language is {language}.
|
||||
- Sometimes, the transcript contains speech of several people participating in a conversation. Although these are not obvious from reading the file, try to detect when other people are speaking.
|
||||
- Divide the transcript into several logical parts. Ensure questions and their answers are in the same logical part.
|
||||
- Divide the transcript into several logical parts. Ensure questions and their answers are in the same logical part. Don't make logical parts too small. They should contain at least 7 or 8 sentences.
|
||||
- annotate the text to identify these logical parts using headings in {language}.
|
||||
- improve errors in the transcript given the context, but do not change the meaning and intentions of the transcription.
|
||||
|
||||
@@ -77,4 +79,6 @@ transcript: |
|
||||
|
||||
The transcript is between triple backquotes.
|
||||
|
||||
$$${previous_part}$$$
|
||||
|
||||
```{transcript}```
|
||||
@@ -141,7 +141,7 @@ if [ $# -eq 0 ]; then
|
||||
SERVICES=()
|
||||
while IFS= read -r line; do
|
||||
SERVICES+=("$line")
|
||||
done < <(yq e '.services | keys | .[]' compose_dev.yaml | grep -E '^(nginx|eveai_)')
|
||||
done < <(yq e '.services | keys | .[]' compose_dev.yaml | grep -E '^(nginx|eveai_|flower)')
|
||||
else
|
||||
SERVICES=("$@")
|
||||
fi
|
||||
@@ -158,7 +158,7 @@ docker buildx use eveai_builder
|
||||
|
||||
# Loop through services
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
if [[ "$SERVICE" == "nginx" || "$SERVICE" == eveai_* ]]; then
|
||||
if [[ "$SERVICE" == "nginx" || "$SERVICE" == eveai_* || "$SERVICE" == "flower" ]]; then
|
||||
if process_service "$SERVICE"; then
|
||||
echo "Successfully processed $SERVICE"
|
||||
else
|
||||
|
||||
@@ -22,6 +22,8 @@ x-common-variables: &common-variables
|
||||
MAIL_PASSWORD: '$$6xsWGbNtx$$CFMQZqc*'
|
||||
MAIL_SERVER: mail.flow-it.net
|
||||
MAIL_PORT: 465
|
||||
REDIS_URL: redis
|
||||
REDIS_PORT: '6379'
|
||||
OPENAI_API_KEY: 'sk-proj-8R0jWzwjL7PeoPyMhJTZT3BlbkFJLb6HfRB2Hr9cEVFWEhU7'
|
||||
GROQ_API_KEY: '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'
|
||||
ANTHROPIC_API_KEY: 'sk-ant-api03-c2TmkzbReeGhXBO5JxNH6BJNylRDonc9GmZd0eRbrvyekec2'
|
||||
@@ -32,6 +34,7 @@ x-common-variables: &common-variables
|
||||
MINIO_ACCESS_KEY: minioadmin
|
||||
MINIO_SECRET_KEY: minioadmin
|
||||
NGINX_SERVER_NAME: 'localhost http://macstudio.ask-eve-ai-local.com/'
|
||||
LANGCHAIN_API_KEY: "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"
|
||||
|
||||
|
||||
networks:
|
||||
@@ -96,12 +99,11 @@ services:
|
||||
minio:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5001/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
# entrypoint: ["scripts/entrypoint.sh"]
|
||||
# command: ["scripts/start_eveai_app.sh"]
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5001/healthz/ready"]
|
||||
interval: 30s
|
||||
timeout: 1s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
@@ -113,8 +115,6 @@ services:
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
# ports:
|
||||
# - 5001:5001
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_workers
|
||||
@@ -132,13 +132,6 @@ services:
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
# healthcheck:
|
||||
# test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
||||
# interval: 10s
|
||||
# timeout: 5s
|
||||
# retries: 5
|
||||
# entrypoint: [ "sh", "-c", "scripts/entrypoint.sh" ]
|
||||
# command: [ "sh", "-c", "scripts/start_eveai_workers.sh" ]
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
@@ -168,12 +161,11 @@ services:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5002/health" ] # Adjust based on your health endpoint
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
# entrypoint: [ "sh", "-c", "scripts/entrypoint.sh" ]
|
||||
# command: ["sh", "-c", "scripts/start_eveai_chat.sh"]
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5002/healthz/ready" ] # Adjust based on your health endpoint
|
||||
interval: 30s
|
||||
timeout: 1s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
@@ -185,8 +177,6 @@ services:
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
# ports:
|
||||
# - 5001:5001
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_chat_workers
|
||||
@@ -202,16 +192,98 @@ services:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
# healthcheck:
|
||||
# test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
||||
# interval: 10s
|
||||
# timeout: 5s
|
||||
# retries: 5
|
||||
# entrypoint: [ "sh", "-c", "scripts/entrypoint.sh" ]
|
||||
# command: [ "sh", "-c", "scripts/start_eveai_chat_workers.sh" ]
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
eveai_api:
|
||||
image: josakola/eveai_api:latest
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ./docker/eveai_api/Dockerfile
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
ports:
|
||||
- 5003:5003
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_api
|
||||
volumes:
|
||||
- ../eveai_api:/app/eveai_api
|
||||
- ../common:/app/common
|
||||
- ../config:/app/config
|
||||
- ../scripts:/app/scripts
|
||||
- ../patched_packages:/app/patched_packages
|
||||
- eveai_logs:/app/logs
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5003/healthz/ready" ]
|
||||
interval: 30s
|
||||
timeout: 1s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
eveai_beat:
|
||||
image: josakola/eveai_beat:latest
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ./docker/eveai_beat/Dockerfile
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_beat
|
||||
volumes:
|
||||
- ../eveai_beat:/app/eveai_beat
|
||||
- ../common:/app/common
|
||||
- ../config:/app/config
|
||||
- ../scripts:/app/scripts
|
||||
- ../patched_packages:/app/patched_packages
|
||||
- eveai_logs:/app/logs
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
eveai_entitlements:
|
||||
image: josakola/eveai_entitlements:latest
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ./docker/eveai_entitlements/Dockerfile
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_entitlements
|
||||
volumes:
|
||||
- ../eveai_entitlements:/app/eveai_entitlements
|
||||
- ../common:/app/common
|
||||
- ../config:/app/config
|
||||
- ../scripts:/app/scripts
|
||||
- ../patched_packages:/app/patched_packages
|
||||
- eveai_logs:/app/logs
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
|
||||
db:
|
||||
hostname: db
|
||||
image: ankane/pgvector
|
||||
@@ -248,6 +320,22 @@ services:
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
flower:
|
||||
image: josakola/flower:latest
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ./docker/flower/Dockerfile
|
||||
environment:
|
||||
<<: *common-variables
|
||||
volumes:
|
||||
- ../scripts:/app/scripts
|
||||
ports:
|
||||
- "5555:5555"
|
||||
depends_on:
|
||||
- redis
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
|
||||
@@ -21,11 +21,13 @@ x-common-variables: &common-variables
|
||||
MAIL_USERNAME: 'evie_admin@askeveai.com'
|
||||
MAIL_PASSWORD: 's5D%R#y^v!s&6Z^i0k&'
|
||||
MAIL_SERVER: mail.askeveai.com
|
||||
MAIL_PORT: 465
|
||||
MAIL_PORT: '465'
|
||||
REDIS_USER: eveai
|
||||
REDIS_PASS: 'jHliZwGD36sONgbm0fc6SOpzLbknqq4RNF8K'
|
||||
REDIS_URL: 8bciqc.stackhero-network.com
|
||||
REDIS_PORT: '9961'
|
||||
FLOWER_USER: 'Felucia'
|
||||
FLOWER_PASSWORD: 'Jungles'
|
||||
OPENAI_API_KEY: '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'
|
||||
GROQ_API_KEY: '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'
|
||||
ANTHROPIC_API_KEY: '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'
|
||||
@@ -38,6 +40,7 @@ x-common-variables: &common-variables
|
||||
MINIO_ACCESS_KEY: 04JKmQln8PQpyTmMiCPc
|
||||
MINIO_SECRET_KEY: 2PEZAD1nlpAmOyDV0TUTuJTQw1qVuYLF3A7GMs0D
|
||||
NGINX_SERVER_NAME: 'evie.askeveai.com mxz536.stackhero-network.com'
|
||||
LANGCHAIN_API_KEY: "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"
|
||||
|
||||
networks:
|
||||
eveai-network:
|
||||
@@ -53,10 +56,6 @@ services:
|
||||
environment:
|
||||
<<: *common-variables
|
||||
volumes:
|
||||
# - ../nginx:/etc/nginx
|
||||
# - ../nginx/sites-enabled:/etc/nginx/sites-enabled
|
||||
# - ../nginx/static:/etc/nginx/static
|
||||
# - ../nginx/public:/etc/nginx/public
|
||||
- eveai_logs:/var/log/nginx
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
@@ -81,7 +80,7 @@ services:
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5001/health"]
|
||||
test: ["CMD", "curl", "-f", "http://localhost:5001/healthz/ready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
@@ -91,18 +90,11 @@ services:
|
||||
eveai_workers:
|
||||
platform: linux/amd64
|
||||
image: josakola/eveai_workers:latest
|
||||
# ports:
|
||||
# - 5001:5001
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_workers
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
# healthcheck:
|
||||
# test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
||||
# interval: 10s
|
||||
# timeout: 5s
|
||||
# retries: 5
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
@@ -117,7 +109,7 @@ services:
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5002/health" ] # Adjust based on your health endpoint
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5002/healthz/ready" ] # Adjust based on your health endpoint
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
@@ -127,28 +119,64 @@ services:
|
||||
eveai_chat_workers:
|
||||
platform: linux/amd64
|
||||
image: josakola/eveai_chat_workers:latest
|
||||
# ports:
|
||||
# - 5001:5001
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_chat_workers
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
# healthcheck:
|
||||
# test: [ "CMD", "curl", "-f", "http://localhost:5001/health" ]
|
||||
# interval: 10s
|
||||
# timeout: 5s
|
||||
# retries: 5
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
eveai_api:
|
||||
platform: linux/amd64
|
||||
image: josakola/eveai_api:latest
|
||||
ports:
|
||||
- 5003:5003
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_api
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:5003/healthz/ready" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
eveai_beat:
|
||||
platform: linux/amd64
|
||||
image: josakola/eveai_beat:latest
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_beat
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
eveai_entitlements:
|
||||
platform: linux/amd64
|
||||
image: josakola/eveai_entitlements:latest
|
||||
environment:
|
||||
<<: *common-variables
|
||||
COMPONENT_NAME: eveai_entitlements
|
||||
volumes:
|
||||
- eveai_logs:/app/logs
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
flower:
|
||||
image: josakola/flower:latest
|
||||
environment:
|
||||
<<: *common-variables
|
||||
ports:
|
||||
- "5555:5555"
|
||||
networks:
|
||||
- eveai-network
|
||||
|
||||
volumes:
|
||||
eveai_logs:
|
||||
# miniAre theo_data:
|
||||
# db-data:
|
||||
# redis-data:
|
||||
# tenant-files:
|
||||
#secrets:
|
||||
# db-password:
|
||||
# file: ./db/password.txt
|
||||
|
||||
|
||||
|
||||
70
docker/eveai_api/Dockerfile
Normal file
70
docker/eveai_api/Dockerfile
Normal file
@@ -0,0 +1,70 @@
|
||||
ARG PYTHON_VERSION=3.12.3
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
|
||||
# Prevents Python from writing pyc files.
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Keeps Python from buffering stdout and stderr to avoid situations where
|
||||
# the application crashes without emitting any logs due to buffering.
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# Create directory for patched packages and set permissions
|
||||
RUN mkdir -p /app/patched_packages && \
|
||||
chmod 777 /app/patched_packages
|
||||
|
||||
# Ensure patches are applied to the application.
|
||||
ENV PYTHONPATH=/app/patched_packages:$PYTHONPATH
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create a non-privileged user that the app will run under.
|
||||
# See https://docs.docker.com/go/dockerfile-user-best-practices/
|
||||
ARG UID=10001
|
||||
RUN adduser \
|
||||
--disabled-password \
|
||||
--gecos "" \
|
||||
--home "/nonexistent" \
|
||||
--shell "/bin/bash" \
|
||||
--no-create-home \
|
||||
--uid "${UID}" \
|
||||
appuser
|
||||
|
||||
# Install necessary packages and build tools
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
gcc \
|
||||
postgresql-client \
|
||||
curl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create logs directory and set permissions
|
||||
RUN mkdir -p /app/logs && chown -R appuser:appuser /app/logs
|
||||
|
||||
# Download dependencies as a separate step to take advantage of Docker's caching.
|
||||
# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
|
||||
# Leverage a bind mount to requirements.txt to avoid having to copy them into
|
||||
# into this layer.
|
||||
|
||||
COPY requirements.txt /app/
|
||||
RUN python -m pip install -r /app/requirements.txt
|
||||
|
||||
# Copy the source code into the container.
|
||||
COPY eveai_api /app/eveai_api
|
||||
COPY common /app/common
|
||||
COPY config /app/config
|
||||
COPY scripts /app/scripts
|
||||
COPY patched_packages /app/patched_packages
|
||||
|
||||
# Set permissions for entrypoint script
|
||||
RUN chmod 777 /app/scripts/entrypoint.sh
|
||||
|
||||
# Set ownership of the application directory to the non-privileged user
|
||||
RUN chown -R appuser:appuser /app
|
||||
|
||||
# Expose the port that the application listens on.
|
||||
EXPOSE 5003
|
||||
|
||||
# Set entrypoint and command
|
||||
ENTRYPOINT ["/app/scripts/entrypoint.sh"]
|
||||
CMD ["/app/scripts/start_eveai_api.sh"]
|
||||
@@ -34,6 +34,7 @@ RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
gcc \
|
||||
postgresql-client \
|
||||
curl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
65
docker/eveai_beat/Dockerfile
Normal file
65
docker/eveai_beat/Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
||||
ARG PYTHON_VERSION=3.12.3
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
|
||||
# Prevents Python from writing pyc files.
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Keeps Python from buffering stdout and stderr to avoid situations where
|
||||
# the application crashes without emitting any logs due to buffering.
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# Create directory for patched packages and set permissions
|
||||
RUN mkdir -p /app/patched_packages && \
|
||||
chmod 777 /app/patched_packages
|
||||
|
||||
# Ensure patches are applied to the application.
|
||||
ENV PYTHONPATH=/app/patched_packages:$PYTHONPATH
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create a non-privileged user that the app will run under.
|
||||
# See https://docs.docker.com/go/dockerfile-user-best-practices/
|
||||
ARG UID=10001
|
||||
RUN adduser \
|
||||
--disabled-password \
|
||||
--gecos "" \
|
||||
--home "/nonexistent" \
|
||||
--shell "/bin/bash" \
|
||||
--no-create-home \
|
||||
--uid "${UID}" \
|
||||
appuser
|
||||
|
||||
# Install necessary packages and build tools
|
||||
#RUN apt-get update && apt-get install -y \
|
||||
# build-essential \
|
||||
# gcc \
|
||||
# && apt-get clean \
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create logs directory and set permissions
|
||||
RUN mkdir -p /app/logs && chown -R appuser:appuser /app/logs
|
||||
|
||||
# Install Python dependencies.
|
||||
|
||||
# Download dependencies as a separate step to take advantage of Docker's caching.
|
||||
# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
|
||||
# Leverage a bind mount to requirements.txt to avoid having to copy them into
|
||||
# into this layer.
|
||||
|
||||
COPY requirements.txt /app/
|
||||
RUN python -m pip install -r /app/requirements.txt
|
||||
|
||||
# Copy the source code into the container.
|
||||
COPY eveai_beat /app/eveai_beat
|
||||
COPY common /app/common
|
||||
COPY config /app/config
|
||||
COPY scripts /app/scripts
|
||||
COPY patched_packages /app/patched_packages
|
||||
COPY --chown=root:root scripts/entrypoint_no_db.sh /app/scripts/
|
||||
|
||||
# Set ownership of the application directory to the non-privileged user
|
||||
RUN chown -R appuser:appuser /app
|
||||
|
||||
# Set entrypoint and command
|
||||
ENTRYPOINT ["/app/scripts/entrypoint_no_db.sh"]
|
||||
CMD ["/app/scripts/start_eveai_beat.sh"]
|
||||
@@ -34,6 +34,7 @@ RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
gcc \
|
||||
postgresql-client \
|
||||
curl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -45,7 +46,7 @@ RUN mkdir -p /app/logs && chown -R appuser:appuser /app/logs
|
||||
# Leverage a bind mount to requirements.txt to avoid having to copy them into
|
||||
# into this layer.
|
||||
|
||||
COPY ../../requirements.txt /app/
|
||||
COPY requirements.txt /app/
|
||||
RUN python -m pip install -r requirements.txt
|
||||
|
||||
# Copy the source code into the container.
|
||||
|
||||
69
docker/eveai_entitlements/Dockerfile
Normal file
69
docker/eveai_entitlements/Dockerfile
Normal file
@@ -0,0 +1,69 @@
|
||||
ARG PYTHON_VERSION=3.12.3
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
|
||||
# Prevents Python from writing pyc files.
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Keeps Python from buffering stdout and stderr to avoid situations where
|
||||
# the application crashes without emitting any logs due to buffering.
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# Create directory for patched packages and set permissions
|
||||
RUN mkdir -p /app/patched_packages && \
|
||||
chmod 777 /app/patched_packages
|
||||
|
||||
# Ensure patches are applied to the application.
|
||||
ENV PYTHONPATH=/app/patched_packages:$PYTHONPATH
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create a non-privileged user that the app will run under.
|
||||
# See https://docs.docker.com/go/dockerfile-user-best-practices/
|
||||
ARG UID=10001
|
||||
RUN adduser \
|
||||
--disabled-password \
|
||||
--gecos "" \
|
||||
--home "/nonexistent" \
|
||||
--shell "/bin/bash" \
|
||||
--no-create-home \
|
||||
--uid "${UID}" \
|
||||
appuser
|
||||
|
||||
# Install necessary packages and build tools
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
gcc \
|
||||
postgresql-client \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create logs directory and set permissions
|
||||
RUN mkdir -p /app/logs && chown -R appuser:appuser /app/logs
|
||||
|
||||
# Install Python dependencies.
|
||||
|
||||
# Download dependencies as a separate step to take advantage of Docker's caching.
|
||||
# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
|
||||
# Leverage a bind mount to requirements.txt to avoid having to copy them into
|
||||
# into this layer.
|
||||
|
||||
COPY requirements.txt /app/
|
||||
RUN python -m pip install -r /app/requirements.txt
|
||||
|
||||
# Copy the source code into the container.
|
||||
COPY eveai_entitlements /app/eveai_entitlements
|
||||
COPY common /app/common
|
||||
COPY config /app/config
|
||||
COPY scripts /app/scripts
|
||||
COPY patched_packages /app/patched_packages
|
||||
COPY --chown=root:root scripts/entrypoint.sh /app/scripts/
|
||||
|
||||
# Set permissions for entrypoint script
|
||||
RUN chmod 777 /app/scripts/entrypoint.sh
|
||||
|
||||
# Set ownership of the application directory to the non-privileged user
|
||||
RUN chown -R appuser:appuser /app
|
||||
|
||||
# Set entrypoint and command
|
||||
ENTRYPOINT ["/app/scripts/entrypoint.sh"]
|
||||
CMD ["/app/scripts/start_eveai_entitlements.sh"]
|
||||
34
docker/flower/Dockerfile
Normal file
34
docker/flower/Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
ARG PYTHON_VERSION=3.12.3
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG UID=10001
|
||||
RUN adduser \
|
||||
--disabled-password \
|
||||
--gecos "" \
|
||||
--home "/nonexistent" \
|
||||
--shell "/bin/bash" \
|
||||
--no-create-home \
|
||||
--uid "${UID}" \
|
||||
appuser
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
gcc \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt /app/
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . /app
|
||||
COPY scripts/start_flower.sh /app/start_flower.sh
|
||||
RUN chmod a+x /app/start_flower.sh
|
||||
|
||||
USER appuser
|
||||
|
||||
CMD ["/app/start_flower.sh"]
|
||||
@@ -1,13 +1,13 @@
|
||||
from flask import Flask, jsonify
|
||||
from flask_jwt_extended import get_jwt_identity
|
||||
from common.extensions import db, api, jwt, minio_client
|
||||
from flask import Flask, jsonify, request
|
||||
from flask_jwt_extended import get_jwt_identity, verify_jwt_in_request
|
||||
from common.extensions import db, api_rest, jwt, minio_client, simple_encryption
|
||||
import os
|
||||
import logging.config
|
||||
|
||||
from common.utils.database import Database
|
||||
from config.logging_config import LOGGING
|
||||
from .api.document_api import AddDocumentResource
|
||||
from .api.auth import TokenResource
|
||||
from .api.document_api import document_ns
|
||||
from .api.auth import auth_ns
|
||||
from config.config import get_config
|
||||
from common.utils.celery_utils import make_celery, init_celery
|
||||
from common.utils.eveai_exceptions import EveAIException
|
||||
@@ -39,34 +39,79 @@ def create_app(config_file=None):
|
||||
# Register Necessary Extensions
|
||||
register_extensions(app)
|
||||
|
||||
# register Namespaces
|
||||
register_namespaces(api_rest)
|
||||
|
||||
# Register Blueprints
|
||||
register_blueprints(app)
|
||||
|
||||
# Error handler for the API
|
||||
@app.errorhandler(EveAIException)
|
||||
def handle_eveai_exception(error):
|
||||
response = jsonify(error.to_dict())
|
||||
response.status_code = error.status_code
|
||||
return response
|
||||
return {'message': str(error)}, error.status_code
|
||||
|
||||
@api.before_request
|
||||
@app.before_request
|
||||
def before_request():
|
||||
# Extract tenant_id from the JWT token
|
||||
tenant_id = get_jwt_identity()
|
||||
app.logger.debug(f'Before request: {request.method} {request.path}')
|
||||
app.logger.debug(f'Request URL: {request.url}')
|
||||
app.logger.debug(f'Request headers: {dict(request.headers)}')
|
||||
|
||||
# Switch to the correct schema
|
||||
Database(tenant_id).switch_schema()
|
||||
# Log request arguments
|
||||
app.logger.debug(f'Request args: {request.args}')
|
||||
|
||||
# Register resources
|
||||
register_api_resources()
|
||||
# Log form data if it's a POST request
|
||||
if request.method == 'POST':
|
||||
app.logger.debug(f'Form data: {request.form}')
|
||||
|
||||
# Log JSON data if the content type is application/json
|
||||
if request.is_json:
|
||||
app.logger.debug(f'JSON data: {request.json}')
|
||||
|
||||
# Log raw data for other content types
|
||||
if request.data:
|
||||
app.logger.debug(f'Raw data: {request.data}')
|
||||
|
||||
# Check if this is a request to the token endpoint
|
||||
if request.path == '/api/v1/auth/token' and request.method == 'POST':
|
||||
app.logger.debug('Token request detected, skipping JWT verification')
|
||||
return
|
||||
|
||||
# Check if this a health check request
|
||||
if request.path.startswith('/_healthz') or request.path.startswith('/healthz'):
|
||||
app.logger.debug('Health check request detected, skipping JWT verification')
|
||||
else:
|
||||
try:
|
||||
verify_jwt_in_request(optional=True)
|
||||
tenant_id = get_jwt_identity()
|
||||
app.logger.debug(f'Tenant ID from JWT: {tenant_id}')
|
||||
|
||||
if tenant_id:
|
||||
Database(tenant_id).switch_schema()
|
||||
app.logger.debug(f'Switched to schema for tenant {tenant_id}')
|
||||
else:
|
||||
app.logger.debug('No tenant ID found in JWT')
|
||||
except Exception as e:
|
||||
app.logger.error(f'Error in before_request: {str(e)}')
|
||||
# Don't raise the exception here, let the request continue
|
||||
# The appropriate error handling will be done in the specific endpoints
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def register_extensions(app):
|
||||
db.init_app(app)
|
||||
api.init_app(app)
|
||||
api_rest.init_app(app, title='EveAI API', version='1.0', description='EveAI API')
|
||||
jwt.init_app(app)
|
||||
minio_client.init_app(app)
|
||||
simple_encryption.init_app(app)
|
||||
|
||||
|
||||
def register_api_resources():
|
||||
api.add_resource(AddDocumentResource, '/api/v1/documents/add_document')
|
||||
api.add_resource(TokenResource, '/api/v1/token')
|
||||
def register_namespaces(app):
|
||||
api_rest.add_namespace(document_ns, path='/api/v1/documents')
|
||||
api_rest.add_namespace(auth_ns, path='/api/v1/auth')
|
||||
|
||||
|
||||
def register_blueprints(app):
|
||||
from .views.healthz_views import healthz_bp
|
||||
app.register_blueprint(healthz_bp)
|
||||
|
||||
|
||||
@@ -1,24 +1,75 @@
|
||||
from flask_restful import Resource, reqparse
|
||||
from datetime import timedelta
|
||||
|
||||
from flask_restx import Namespace, Resource, fields
|
||||
from flask_jwt_extended import create_access_token
|
||||
from common.models.user import Tenant
|
||||
from common.extensions import simple_encryption
|
||||
from flask import current_app
|
||||
from flask import current_app, request
|
||||
|
||||
auth_ns = Namespace('auth', description='Authentication related operations')
|
||||
|
||||
token_model = auth_ns.model('Token', {
|
||||
'tenant_id': fields.Integer(required=True, description='Tenant ID'),
|
||||
'api_key': fields.String(required=True, description='API Key')
|
||||
})
|
||||
|
||||
token_response = auth_ns.model('TokenResponse', {
|
||||
'access_token': fields.String(description='JWT access token'),
|
||||
'expires_in': fields.Integer(description='Token expiration time in seconds')
|
||||
})
|
||||
|
||||
|
||||
class TokenResource(Resource):
|
||||
@auth_ns.route('/token')
|
||||
class Token(Resource):
|
||||
@auth_ns.expect(token_model)
|
||||
@auth_ns.response(200, 'Success', token_response)
|
||||
@auth_ns.response(400, 'Validation Error')
|
||||
@auth_ns.response(401, 'Unauthorized')
|
||||
@auth_ns.response(404, 'Tenant Not Found')
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('tenant_id', type=int, required=True)
|
||||
parser.add_argument('api_key', type=str, required=True)
|
||||
args = parser.parse_args()
|
||||
"""
|
||||
Get JWT token
|
||||
"""
|
||||
current_app.logger.debug(f"Token endpoint called with data: {request.json}")
|
||||
|
||||
try:
|
||||
tenant_id = auth_ns.payload['tenant_id']
|
||||
api_key = auth_ns.payload['api_key']
|
||||
except KeyError as e:
|
||||
current_app.logger.error(f"Missing required field: {e}")
|
||||
return {'message': f"Missing required field: {e}"}, 400
|
||||
|
||||
current_app.logger.debug(f"Querying database for tenant: {tenant_id}")
|
||||
tenant = Tenant.query.get(tenant_id)
|
||||
|
||||
tenant = Tenant.query.get(args['tenant_id'])
|
||||
if not tenant:
|
||||
return {'message': 'Tenant not found'}, 404
|
||||
current_app.logger.error(f"Tenant not found: {tenant_id}")
|
||||
return {'message': "Tenant not found"}, 404
|
||||
|
||||
decrypted_api_key = simple_encryption.decrypt_api_key(tenant.encrypted_api_key)
|
||||
if args['api_key'] != decrypted_api_key:
|
||||
return {'message': 'Invalid API key'}, 401
|
||||
current_app.logger.debug(f"Tenant found: {tenant.id}")
|
||||
|
||||
access_token = create_access_token(identity={'tenant_id': tenant.id})
|
||||
return {'access_token': access_token}, 200
|
||||
try:
|
||||
current_app.logger.debug("Attempting to decrypt API key")
|
||||
decrypted_api_key = simple_encryption.decrypt_api_key(tenant.encrypted_api_key)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error decrypting API key: {e}")
|
||||
return {'message': "Internal server error"}, 500
|
||||
|
||||
if api_key != decrypted_api_key:
|
||||
current_app.logger.error(f"Invalid API key for tenant: {tenant_id}")
|
||||
return {'message': "Invalid API key"}, 401
|
||||
|
||||
# Get the JWT_ACCESS_TOKEN_EXPIRES setting from the app config
|
||||
expires_delta = current_app.config.get('JWT_ACCESS_TOKEN_EXPIRES', timedelta(minutes=15))
|
||||
|
||||
try:
|
||||
current_app.logger.debug(f"Creating access token for tenant: {tenant_id}")
|
||||
access_token = create_access_token(identity=tenant_id, expires_delta=expires_delta)
|
||||
current_app.logger.debug("Access token created successfully")
|
||||
return {
|
||||
'access_token': access_token,
|
||||
'expires_in': expires_delta.total_seconds()
|
||||
}, 200
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error creating access token: {e}")
|
||||
return {'message': "Internal server error"}, 500
|
||||
|
||||
@@ -1,31 +1,73 @@
|
||||
from flask_restful import Resource, reqparse
|
||||
from flask import current_app
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
from flask import current_app, request
|
||||
from flask_restx import Namespace, Resource, fields, reqparse
|
||||
from flask_jwt_extended import jwt_required, get_jwt_identity
|
||||
from werkzeug.datastructures import FileStorage
|
||||
from werkzeug.utils import secure_filename
|
||||
from common.utils.document_utils import (
|
||||
create_document_stack, process_url, start_embedding_task,
|
||||
validate_file_type, EveAIInvalidLanguageException, EveAIDoubleURLException, EveAIUnsupportedFileType,
|
||||
process_multiple_urls, prepare_youtube_document
|
||||
process_multiple_urls, get_documents_list, edit_document, refresh_document, edit_document_version,
|
||||
refresh_document_with_info
|
||||
)
|
||||
from common.utils.eveai_exceptions import EveAIYoutubeError
|
||||
|
||||
|
||||
class AddDocumentResource(Resource):
|
||||
def validate_date(date_str):
|
||||
try:
|
||||
return datetime.fromisoformat(date_str).replace(tzinfo=pytz.UTC)
|
||||
except ValueError:
|
||||
raise ValueError("Invalid date format. Use ISO format (YYYY-MM-DDTHH:MM:SS).")
|
||||
|
||||
|
||||
def validate_json(json_str):
|
||||
try:
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError:
|
||||
raise ValueError("Invalid JSON format for user_metadata.")
|
||||
|
||||
|
||||
document_ns = Namespace('documents', description='Document related operations')
|
||||
|
||||
# Define models for request parsing and response serialization
|
||||
upload_parser = reqparse.RequestParser()
|
||||
upload_parser.add_argument('file', location='files', type=FileStorage, required=True, help='The file to upload')
|
||||
upload_parser.add_argument('name', location='form', type=str, required=False, help='Name of the document')
|
||||
upload_parser.add_argument('language', location='form', type=str, required=True, help='Language of the document')
|
||||
upload_parser.add_argument('user_context', location='form', type=str, required=False,
|
||||
help='User context for the document')
|
||||
upload_parser.add_argument('valid_from', location='form', type=validate_date, required=False,
|
||||
help='Valid from date for the document (ISO format)')
|
||||
upload_parser.add_argument('user_metadata', location='form', type=validate_json, required=False,
|
||||
help='User metadata for the document (JSON format)')
|
||||
|
||||
add_document_response = document_ns.model('AddDocumentResponse', {
|
||||
'message': fields.String(description='Status message'),
|
||||
'document_id': fields.Integer(description='ID of the created document'),
|
||||
'document_version_id': fields.Integer(description='ID of the created document version'),
|
||||
'task_id': fields.String(description='ID of the embedding task')
|
||||
})
|
||||
|
||||
|
||||
@document_ns.route('/add_document')
|
||||
class AddDocument(Resource):
|
||||
@jwt_required()
|
||||
@document_ns.expect(upload_parser)
|
||||
@document_ns.response(201, 'Document added successfully', add_document_response)
|
||||
@document_ns.response(400, 'Validation Error')
|
||||
@document_ns.response(500, 'Internal Server Error')
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('file', type=FileStorage, location='files', required=True)
|
||||
parser.add_argument('name', type=str, required=False)
|
||||
parser.add_argument('language', type=str, required=True)
|
||||
parser.add_argument('user_context', type=str, required=False)
|
||||
parser.add_argument('valid_from', type=str, required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
"""
|
||||
Add a new document
|
||||
"""
|
||||
tenant_id = get_jwt_identity()
|
||||
current_app.logger.info(f'Adding document for tenant {tenant_id}')
|
||||
|
||||
try:
|
||||
args = upload_parser.parse_args()
|
||||
|
||||
file = args['file']
|
||||
filename = secure_filename(file.filename)
|
||||
extension = filename.rsplit('.', 1)[1].lower()
|
||||
@@ -33,10 +75,11 @@ class AddDocumentResource(Resource):
|
||||
validate_file_type(extension)
|
||||
|
||||
api_input = {
|
||||
'name': args['name'] or filename,
|
||||
'language': args['language'],
|
||||
'user_context': args['user_context'],
|
||||
'valid_from': args['valid_from']
|
||||
'name': args.get('name') or filename,
|
||||
'language': args.get('language'),
|
||||
'user_context': args.get('user_context'),
|
||||
'valid_from': args.get('valid_from'),
|
||||
'user_metadata': args.get('user_metadata'),
|
||||
}
|
||||
|
||||
new_doc, new_doc_vers = create_document_stack(api_input, file, filename, extension, tenant_id)
|
||||
@@ -50,35 +93,57 @@ class AddDocumentResource(Resource):
|
||||
}, 201
|
||||
|
||||
except (EveAIInvalidLanguageException, EveAIUnsupportedFileType) as e:
|
||||
return {'message': str(e)}, 400
|
||||
current_app.logger.error(f'Error adding document: {str(e)}')
|
||||
document_ns.abort(400, str(e))
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Error adding document: {str(e)}')
|
||||
return {'message': 'Error adding document'}, 500
|
||||
document_ns.abort(500, 'Error adding document')
|
||||
|
||||
|
||||
class AddURLResource(Resource):
|
||||
# Models for AddURL
|
||||
add_url_model = document_ns.model('AddURL', {
|
||||
'url': fields.String(required=True, description='URL of the document to add'),
|
||||
'name': fields.String(required=False, description='Name of the document'),
|
||||
'language': fields.String(required=True, description='Language of the document'),
|
||||
'user_context': fields.String(required=False, description='User context for the document'),
|
||||
'valid_from': fields.String(required=False, description='Valid from date for the document'),
|
||||
'user_metadata': fields.String(required=False, description='User metadata for the document'),
|
||||
'system_metadata': fields.String(required=False, description='System metadata for the document')
|
||||
})
|
||||
|
||||
add_url_response = document_ns.model('AddURLResponse', {
|
||||
'message': fields.String(description='Status message'),
|
||||
'document_id': fields.Integer(description='ID of the created document'),
|
||||
'document_version_id': fields.Integer(description='ID of the created document version'),
|
||||
'task_id': fields.String(description='ID of the embedding task')
|
||||
})
|
||||
|
||||
|
||||
@document_ns.route('/add_url')
|
||||
class AddURL(Resource):
|
||||
@jwt_required()
|
||||
@document_ns.expect(add_url_model)
|
||||
@document_ns.response(201, 'Document added successfully', add_url_response)
|
||||
@document_ns.response(400, 'Validation Error')
|
||||
@document_ns.response(500, 'Internal Server Error')
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('url', type=str, required=True)
|
||||
parser.add_argument('name', type=str, required=False)
|
||||
parser.add_argument('language', type=str, required=True)
|
||||
parser.add_argument('user_context', type=str, required=False)
|
||||
parser.add_argument('valid_from', type=str, required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
"""
|
||||
Add a new document from URL
|
||||
"""
|
||||
tenant_id = get_jwt_identity()
|
||||
current_app.logger.info(f'Adding document from URL for tenant {tenant_id}')
|
||||
|
||||
try:
|
||||
args = document_ns.payload
|
||||
file_content, filename, extension = process_url(args['url'], tenant_id)
|
||||
|
||||
api_input = {
|
||||
'url': args['url'],
|
||||
'name': args['name'] or filename,
|
||||
'name': args.get('name') or filename,
|
||||
'language': args['language'],
|
||||
'user_context': args['user_context'],
|
||||
'valid_from': args['valid_from']
|
||||
'user_context': args.get('user_context'),
|
||||
'valid_from': args.get('valid_from'),
|
||||
'user_metadata': args.get('user_metadata'),
|
||||
}
|
||||
|
||||
new_doc, new_doc_vers = create_document_stack(api_input, file_content, filename, extension, tenant_id)
|
||||
@@ -92,87 +157,157 @@ class AddURLResource(Resource):
|
||||
}, 201
|
||||
|
||||
except EveAIDoubleURLException:
|
||||
return {'message': f'A document with URL {args["url"]} already exists.'}, 400
|
||||
document_ns.abort(400, f'A document with URL {args["url"]} already exists.')
|
||||
except (EveAIInvalidLanguageException, EveAIUnsupportedFileType) as e:
|
||||
return {'message': str(e)}, 400
|
||||
document_ns.abort(400, str(e))
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Error adding document from URL: {str(e)}')
|
||||
return {'message': 'Error adding document from URL'}, 500
|
||||
document_ns.abort(500, 'Error adding document from URL')
|
||||
|
||||
|
||||
class AddMultipleURLsResource(Resource):
|
||||
document_list_model = document_ns.model('DocumentList', {
|
||||
'id': fields.Integer(description='Document ID'),
|
||||
'name': fields.String(description='Document name'),
|
||||
'valid_from': fields.DateTime(description='Valid from date'),
|
||||
'valid_to': fields.DateTime(description='Valid to date'),
|
||||
})
|
||||
|
||||
|
||||
@document_ns.route('/list')
|
||||
class DocumentList(Resource):
|
||||
@jwt_required()
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('urls', type=str, action='append', required=True)
|
||||
parser.add_argument('name', type=str, required=False)
|
||||
parser.add_argument('language', type=str, required=True)
|
||||
parser.add_argument('user_context', type=str, required=False)
|
||||
parser.add_argument('valid_from', type=str, required=False)
|
||||
args = parser.parse_args()
|
||||
@document_ns.doc('list_documents')
|
||||
@document_ns.marshal_list_with(document_list_model, envelope='documents')
|
||||
def get(self):
|
||||
"""List all documents"""
|
||||
page = request.args.get('page', 1, type=int)
|
||||
per_page = request.args.get('per_page', 10, type=int)
|
||||
pagination = get_documents_list(page, per_page)
|
||||
return pagination.items, 200
|
||||
|
||||
|
||||
edit_document_model = document_ns.model('EditDocument', {
|
||||
'name': fields.String(required=True, description='New name for the document'),
|
||||
'valid_from': fields.DateTime(required=False, description='New valid from date'),
|
||||
'valid_to': fields.DateTime(required=False, description='New valid to date'),
|
||||
})
|
||||
|
||||
|
||||
@document_ns.route('/<int:document_id>')
|
||||
class DocumentResource(Resource):
|
||||
@jwt_required()
|
||||
@document_ns.doc('edit_document')
|
||||
@document_ns.expect(edit_document_model)
|
||||
@document_ns.response(200, 'Document updated successfully')
|
||||
def put(self, document_id):
|
||||
"""Edit a document"""
|
||||
data = request.json
|
||||
updated_doc, error = edit_document(document_id, data['name'], data.get('valid_from'), data.get('valid_to'))
|
||||
if updated_doc:
|
||||
return {'message': f'Document {updated_doc.id} updated successfully'}, 200
|
||||
else:
|
||||
return {'message': f'Error updating document: {error}'}, 400
|
||||
|
||||
@jwt_required()
|
||||
@document_ns.doc('refresh_document')
|
||||
@document_ns.response(200, 'Document refreshed successfully')
|
||||
def post(self, document_id):
|
||||
"""Refresh a document"""
|
||||
new_version, result = refresh_document(document_id)
|
||||
if new_version:
|
||||
return {'message': f'Document refreshed. New version: {new_version.id}. Task ID: {result}'}, 200
|
||||
else:
|
||||
return {'message': f'Error refreshing document: {result}'}, 400
|
||||
|
||||
|
||||
edit_document_version_model = document_ns.model('EditDocumentVersion', {
|
||||
'user_context': fields.String(required=True, description='New user context for the document version'),
|
||||
})
|
||||
|
||||
|
||||
@document_ns.route('/version/<int:version_id>')
|
||||
class DocumentVersionResource(Resource):
|
||||
@jwt_required()
|
||||
@document_ns.doc('edit_document_version')
|
||||
@document_ns.expect(edit_document_version_model)
|
||||
@document_ns.response(200, 'Document version updated successfully')
|
||||
def put(self, version_id):
|
||||
"""Edit a document version"""
|
||||
data = request.json
|
||||
updated_version, error = edit_document_version(version_id, data['user_context'])
|
||||
if updated_version:
|
||||
return {'message': f'Document Version {updated_version.id} updated successfully'}, 200
|
||||
else:
|
||||
return {'message': f'Error updating document version: {error}'}, 400
|
||||
|
||||
|
||||
# Define the model for the request body of refresh_with_info
|
||||
refresh_document_model = document_ns.model('RefreshDocument', {
|
||||
'name': fields.String(required=False, description='New name for the document'),
|
||||
'language': fields.String(required=False, description='Language of the document'),
|
||||
'user_context': fields.String(required=False, description='User context for the document'),
|
||||
'user_metadata': fields.Raw(required=False, description='User metadata for the document')
|
||||
})
|
||||
|
||||
|
||||
@document_ns.route('/<int:document_id>/refresh')
|
||||
class RefreshDocument(Resource):
|
||||
@jwt_required()
|
||||
@document_ns.response(200, 'Document refreshed successfully')
|
||||
@document_ns.response(404, 'Document not found')
|
||||
def post(self, document_id):
|
||||
"""
|
||||
Refresh a document without additional information
|
||||
"""
|
||||
tenant_id = get_jwt_identity()
|
||||
current_app.logger.info(f'Adding multiple documents from URLs for tenant {tenant_id}')
|
||||
current_app.logger.info(f'Refreshing document {document_id} for tenant {tenant_id}')
|
||||
|
||||
try:
|
||||
api_input = {
|
||||
'name': args['name'],
|
||||
'language': args['language'],
|
||||
'user_context': args['user_context'],
|
||||
'valid_from': args['valid_from']
|
||||
}
|
||||
new_version, result = refresh_document(document_id)
|
||||
|
||||
results = process_multiple_urls(args['urls'], tenant_id, api_input)
|
||||
|
||||
return {
|
||||
'message': 'Processing of multiple URLs completed',
|
||||
'results': results
|
||||
}, 201
|
||||
if new_version:
|
||||
return {
|
||||
'message': f'Document refreshed successfully. New version: {new_version.id}. Task ID: {result}',
|
||||
'document_id': document_id,
|
||||
'document_version_id': new_version.id,
|
||||
'task_id': result
|
||||
}, 200
|
||||
else:
|
||||
return {'message': f'Error refreshing document: {result}'}, 400
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Error adding documents from URLs: {str(e)}')
|
||||
return {'message': 'Error adding documents from URLs'}, 500
|
||||
current_app.logger.error(f'Error refreshing document: {str(e)}')
|
||||
return {'message': 'Internal server error'}, 500
|
||||
|
||||
|
||||
class AddYoutubeResource(Resource):
|
||||
@document_ns.route('/<int:document_id>/refresh_with_info')
|
||||
class RefreshDocumentWithInfo(Resource):
|
||||
@jwt_required()
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('url', type=str, required=True)
|
||||
parser.add_argument('name', type=str, required=False)
|
||||
parser.add_argument('language', type=str, required=True)
|
||||
parser.add_argument('user_context', type=str, required=False)
|
||||
parser.add_argument('valid_from', type=str, required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
@document_ns.expect(refresh_document_model)
|
||||
@document_ns.response(200, 'Document refreshed successfully')
|
||||
@document_ns.response(400, 'Validation Error')
|
||||
@document_ns.response(404, 'Document not found')
|
||||
def post(self, document_id):
|
||||
"""
|
||||
Refresh a document with new information
|
||||
"""
|
||||
tenant_id = get_jwt_identity()
|
||||
current_app.logger.info(f'Adding YouTube document for tenant {tenant_id}')
|
||||
current_app.logger.info(f'Refreshing document {document_id} with info for tenant {tenant_id}')
|
||||
|
||||
try:
|
||||
api_input = {
|
||||
'name': args['name'],
|
||||
'language': args['language'],
|
||||
'user_context': args['user_context'],
|
||||
'valid_from': args['valid_from']
|
||||
}
|
||||
api_input = request.json
|
||||
new_version, result = refresh_document_with_info(document_id, api_input)
|
||||
|
||||
new_doc, new_doc_vers = prepare_youtube_document(args['url'], tenant_id, api_input)
|
||||
task_id = start_embedding_task(tenant_id, new_doc_vers.id)
|
||||
if new_version:
|
||||
return {
|
||||
'message': f'Document refreshed successfully with new info. New version: {new_version.id}. Task ID: {result}',
|
||||
'document_id': document_id,
|
||||
'document_version_id': new_version.id,
|
||||
'task_id': result
|
||||
}, 200
|
||||
else:
|
||||
return {'message': f'Error refreshing document with info: {result}'}, 400
|
||||
|
||||
return {
|
||||
'message': f'Processing on YouTube document {new_doc.name}, version {new_doc_vers.id} started. Task ID: {task_id}.',
|
||||
'document_id': new_doc.id,
|
||||
'document_version_id': new_doc_vers.id,
|
||||
'task_id': task_id
|
||||
}, 201
|
||||
|
||||
except EveAIYoutubeError as e:
|
||||
return {'message': str(e)}, 400
|
||||
except (EveAIInvalidLanguageException, EveAIUnsupportedFileType) as e:
|
||||
return {'message': str(e)}, 400
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Error adding YouTube document: {str(e)}')
|
||||
return {'message': 'Error adding YouTube document'}, 500
|
||||
|
||||
|
||||
# You can add more API resources here as needed
|
||||
current_app.logger.error(f'Error refreshing document with info: {str(e)}')
|
||||
return {'message': 'Internal server error'}, 500
|
||||
|
||||
82
eveai_api/views/healthz_views.py
Normal file
82
eveai_api/views/healthz_views.py
Normal file
@@ -0,0 +1,82 @@
|
||||
from flask import Blueprint, current_app, request
|
||||
from flask_healthz import HealthError
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from celery.exceptions import TimeoutError as CeleryTimeoutError
|
||||
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
|
||||
from common.extensions import db, metrics, minio_client
|
||||
from common.utils.celery_utils import current_celery
|
||||
|
||||
healthz_bp = Blueprint('healthz', __name__, url_prefix='/_healthz')
|
||||
|
||||
# Define Prometheus metrics
|
||||
api_request_counter = Counter('api_request_count', 'API Request Count', ['method', 'endpoint'])
|
||||
api_request_latency = Histogram('api_request_latency_seconds', 'API Request latency')
|
||||
|
||||
|
||||
def liveness():
|
||||
try:
|
||||
# Basic check to see if the app is running
|
||||
return True
|
||||
except Exception:
|
||||
raise HealthError("Liveness check failed")
|
||||
|
||||
|
||||
def readiness():
|
||||
checks = {
|
||||
"database": check_database(),
|
||||
# "celery": check_celery(),
|
||||
"minio": check_minio(),
|
||||
# Add more checks as needed
|
||||
}
|
||||
|
||||
if not all(checks.values()):
|
||||
raise HealthError("Readiness check failed")
|
||||
|
||||
|
||||
def check_database():
|
||||
try:
|
||||
# Perform a simple database query
|
||||
db.session.execute("SELECT 1")
|
||||
return True
|
||||
except SQLAlchemyError:
|
||||
current_app.logger.error("Database check failed", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def check_celery():
|
||||
try:
|
||||
# Send a simple task to Celery
|
||||
result = current_celery.send_task('ping', queue='eveai_workers.ping')
|
||||
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
||||
return response == 'pong'
|
||||
except CeleryTimeoutError:
|
||||
current_app.logger.error("Celery check timed out", exc_info=True)
|
||||
return False
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Celery check failed: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def check_minio():
|
||||
try:
|
||||
# List buckets to check if MinIO is accessible
|
||||
minio_client.list_buckets()
|
||||
return True
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"MinIO check failed: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
@healthz_bp.route('/metrics')
|
||||
@metrics.do_not_track()
|
||||
def prometheus_metrics():
|
||||
return generate_latest(), 200, {'Content-Type': CONTENT_TYPE_LATEST}
|
||||
|
||||
|
||||
def init_healtz(app):
|
||||
app.config.update(
|
||||
HEALTHZ={
|
||||
"live": "healthz_views.liveness",
|
||||
"ready": "healthz_views.readiness",
|
||||
}
|
||||
)
|
||||
@@ -7,9 +7,11 @@ from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
import logging.config
|
||||
|
||||
from common.extensions import (db, migrate, bootstrap, security, mail, login_manager, cors, csrf, session,
|
||||
minio_client, simple_encryption)
|
||||
minio_client, simple_encryption, metrics)
|
||||
from common.models.user import User, Role, Tenant, TenantDomain
|
||||
import common.models.interaction
|
||||
import common.models.entitlements
|
||||
import common.models.document
|
||||
from common.utils.nginx_utils import prefixed_url_for
|
||||
from config.logging_config import LOGGING
|
||||
from common.utils.security import set_tenant_session_data
|
||||
@@ -114,10 +116,10 @@ def register_extensions(app):
|
||||
csrf.init_app(app)
|
||||
login_manager.init_app(app)
|
||||
cors.init_app(app)
|
||||
# kms_client.init_app(app)
|
||||
simple_encryption.init_app(app)
|
||||
session.init_app(app)
|
||||
minio_client.init_app(app)
|
||||
metrics.init_app(app)
|
||||
|
||||
|
||||
# Register Blueprints
|
||||
@@ -132,3 +134,11 @@ def register_blueprints(app):
|
||||
app.register_blueprint(security_bp)
|
||||
from .views.interaction_views import interaction_bp
|
||||
app.register_blueprint(interaction_bp)
|
||||
from .views.entitlements_views import entitlements_bp
|
||||
app.register_blueprint(entitlements_bp)
|
||||
from .views.administration_views import administration_bp
|
||||
app.register_blueprint(administration_bp)
|
||||
from .views.healthz_views import healthz_bp, init_healtz
|
||||
app.register_blueprint(healthz_bp)
|
||||
init_healtz(app)
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
22
eveai_app/templates/administration/trigger_actions.html
Normal file
22
eveai_app/templates/administration/trigger_actions.html
Normal file
@@ -0,0 +1,22 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_selectable_table, render_pagination, render_field %}
|
||||
{% block title %}Trigger Actions{% endblock %}
|
||||
{% block content_title %}Trigger Actions{% endblock %}
|
||||
{% block content_description %}Manually trigger batch actions{% endblock %}
|
||||
{% block content %}
|
||||
|
||||
<!-- Trigger action Form -->
|
||||
<form method="POST" action="{{ url_for('administration_bp.handle_trigger_action') }}">
|
||||
<div class="form-group mt-3">
|
||||
<button type="submit" name="action" value="update_usages" class="btn btn-secondary">Update Usages</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block content_footer %}
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
{% endblock %}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_field %}
|
||||
|
||||
{% block title %}Add Youtube Document{% endblock %}
|
||||
|
||||
{% block content_title %}Add Youtube Document{% endblock %}
|
||||
{% block content_description %}Add a youtube url and the corresponding document to EveAI. In some cases, url's cannot be loaded directly. Download the html and add it as a document in that case.{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{% set disabled_fields = [] %}
|
||||
{% set exclude_fields = [] %}
|
||||
{% for field in form %}
|
||||
{{ render_field(field, disabled_fields, exclude_fields) }}
|
||||
{% endfor %}
|
||||
<button type="submit" class="btn btn-primary">Add Youtube Document</button>
|
||||
</form>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content_footer %}
|
||||
|
||||
{% endblock %}
|
||||
@@ -10,7 +10,7 @@
|
||||
{% block content %}
|
||||
<div class="container">
|
||||
<form method="POST" action="{{ url_for('document_bp.handle_document_version_selection') }}">
|
||||
{{ render_selectable_table(headers=["ID", "URL", "File Loc.", "File Name", "File Type", "Process.", "Proces. Start", "Proces. Finish", "Proces. Error"], rows=rows, selectable=True, id="versionsTable") }}
|
||||
{{ render_selectable_table(headers=["ID", "URL", "Object Name", "File Type", "Process.", "Proces. Start", "Proces. Finish", "Proces. Error"], rows=rows, selectable=True, id="versionsTable") }}
|
||||
<div class="form-group mt-3">
|
||||
<button type="submit" name="action" value="edit_document_version" class="btn btn-primary">Edit Document Version</button>
|
||||
<button type="submit" name="action" value="process_document_version" class="btn btn-danger">Process Document Version</button>
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
{% block content %}
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{% set disabled_fields = ['language', 'system_context'] %}
|
||||
{% set disabled_fields = ['language', 'system_context', 'system_metadata'] %}
|
||||
{% set exclude_fields = [] %}
|
||||
{% for field in form %}
|
||||
{{ render_field(field, disabled_fields, exclude_fields) }}
|
||||
|
||||
71
eveai_app/templates/entitlements/edit_license.html
Normal file
71
eveai_app/templates/entitlements/edit_license.html
Normal file
@@ -0,0 +1,71 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_field, render_included_field %}
|
||||
|
||||
{% block title %}Edit License for Current Tenant{% endblock %}
|
||||
|
||||
{% block content_title %}Edit License for Current Tenant{% endblock %}
|
||||
{% block content_description %}Edit a License based on the selected License Tier for the current Tenant{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{% set main_fields = ['start_date', 'end_date', 'currency', 'yearly_payment', 'basic_fee'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=['currency'], include_fields=main_fields) }}
|
||||
{% endfor %}
|
||||
<!-- Nav Tabs -->
|
||||
<div class="row mt-5">
|
||||
<div class="col-lg-12">
|
||||
<div class="nav-wrapper position-relative end-0">
|
||||
<ul class="nav nav-pills nav-fill p-1" role="tablist">
|
||||
<li class="nav-item" role="presentation">
|
||||
<a class="nav-link mb-0 px-0 py-1 active" data-toggle="tab" href="#storage-tab" role="tab" aria-controls="model-info" aria-selected="true">
|
||||
Storage
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#embedding-tab" role="tab" aria-controls="license-info" aria-selected="false">
|
||||
Embedding
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#interaction-tab" role="tab" aria-controls="chunking" aria-selected="false">
|
||||
Interaction
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tab-content tab-space">
|
||||
<!-- Storage Tab -->
|
||||
<div class="tab-pane fade show active" id="storage-tab" role="tabpanel">
|
||||
{% set storage_fields = ['max_storage_tokens', 'additional_storage_token_price', 'additional_storage_bucket'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=storage_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Embedding Tab -->
|
||||
<div class="tab-pane fade" id="embedding-tab" role="tabpanel">
|
||||
{% set embedding_fields = ['included_embedding_tokens', 'additional_embedding_token_price', 'additional_embedding_bucket'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=embedding_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Interaction Tab -->
|
||||
<div class="tab-pane fade" id="interaction-tab" role="tabpanel">
|
||||
{% set interaction_fields = ['included_interaction_tokens', 'additional_interaction_token_price', 'additional_interaction_bucket'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=interaction_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button type="submit" class="btn btn-primary">Save License</button>
|
||||
</form>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content_footer %}
|
||||
|
||||
{% endblock %}
|
||||
71
eveai_app/templates/entitlements/license.html
Normal file
71
eveai_app/templates/entitlements/license.html
Normal file
@@ -0,0 +1,71 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_field, render_included_field %}
|
||||
|
||||
{% block title %}Create or Edit License for Current Tenant{% endblock %}
|
||||
|
||||
{% block content_title %}Create or Edit License for Current Tenant{% endblock %}
|
||||
{% block content_description %}Create or Edit a new License based on the selected License Tier for the current Tenant{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{% set main_fields = ['start_date', 'end_date', 'currency', 'yearly_payment', 'basic_fee'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=ext_disabled_fields + ['currency'], include_fields=main_fields) }}
|
||||
{% endfor %}
|
||||
<!-- Nav Tabs -->
|
||||
<div class="row mt-5">
|
||||
<div class="col-lg-12">
|
||||
<div class="nav-wrapper position-relative end-0">
|
||||
<ul class="nav nav-pills nav-fill p-1" role="tablist">
|
||||
<li class="nav-item" role="presentation">
|
||||
<a class="nav-link mb-0 px-0 py-1 active" data-toggle="tab" href="#storage-tab" role="tab" aria-controls="model-info" aria-selected="true">
|
||||
Storage
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#embedding-tab" role="tab" aria-controls="license-info" aria-selected="false">
|
||||
Embedding
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#interaction-tab" role="tab" aria-controls="chunking" aria-selected="false">
|
||||
Interaction
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tab-content tab-space">
|
||||
<!-- Storage Tab -->
|
||||
<div class="tab-pane fade show active" id="storage-tab" role="tabpanel">
|
||||
{% set storage_fields = ['max_storage_mb', 'additional_storage_price', 'additional_storage_bucket'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=ext_disabled_fields, include_fields=storage_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Embedding Tab -->
|
||||
<div class="tab-pane fade" id="embedding-tab" role="tabpanel">
|
||||
{% set embedding_fields = ['included_embedding_mb', 'additional_embedding_price', 'additional_embedding_bucket', 'overage_embedding'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=ext_disabled_fields, include_fields=embedding_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Interaction Tab -->
|
||||
<div class="tab-pane fade" id="interaction-tab" role="tabpanel">
|
||||
{% set interaction_fields = ['included_interaction_tokens', 'additional_interaction_token_price', 'additional_interaction_bucket', 'overage_interaction'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=ext_disabled_fields, include_fields=interaction_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button type="submit" class="btn btn-primary">Save License</button>
|
||||
</form>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content_footer %}
|
||||
|
||||
{% endblock %}
|
||||
71
eveai_app/templates/entitlements/license_tier.html
Normal file
71
eveai_app/templates/entitlements/license_tier.html
Normal file
@@ -0,0 +1,71 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_field, render_included_field %}
|
||||
|
||||
{% block title %}Register or Edit License Tier{% endblock %}
|
||||
|
||||
{% block content_title %}Register or Edit License Tier{% endblock %}
|
||||
{% block content_description %}Register or Edit License Tier{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{% set main_fields = ['name', 'version', 'start_date', 'end_date', 'basic_fee_d', 'basic_fee_e'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=main_fields) }}
|
||||
{% endfor %}
|
||||
<!-- Nav Tabs -->
|
||||
<div class="row mt-5">
|
||||
<div class="col-lg-12">
|
||||
<div class="nav-wrapper position-relative end-0">
|
||||
<ul class="nav nav-pills nav-fill p-1" role="tablist">
|
||||
<li class="nav-item" role="presentation">
|
||||
<a class="nav-link mb-0 px-0 py-1 active" data-toggle="tab" href="#storage-tab" role="tab" aria-controls="model-info" aria-selected="true">
|
||||
Storage
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#embedding-tab" role="tab" aria-controls="license-info" aria-selected="false">
|
||||
Embedding
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#interaction-tab" role="tab" aria-controls="chunking" aria-selected="false">
|
||||
Interaction
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tab-content tab-space">
|
||||
<!-- Storage Tab -->
|
||||
<div class="tab-pane fade show active" id="storage-tab" role="tabpanel">
|
||||
{% set storage_fields = ['max_storage_mb', 'additional_storage_price_d', 'additional_storage_price_e', 'additional_storage_bucket'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=storage_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Embedding Tab -->
|
||||
<div class="tab-pane fade" id="embedding-tab" role="tabpanel">
|
||||
{% set embedding_fields = ['included_embedding_mb', 'additional_embedding_price_d', 'additional_embedding_price_e', 'additional_embedding_bucket', 'standard_overage_embedding'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=embedding_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Interaction Tab -->
|
||||
<div class="tab-pane fade" id="interaction-tab" role="tabpanel">
|
||||
{% set interaction_fields = ['included_interaction_tokens', 'additional_interaction_token_price_d', 'additional_interaction_token_price_e', 'additional_interaction_bucket', 'standard_overage_interaction'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=interaction_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button type="submit" class="btn btn-primary">Save License Tier</button>
|
||||
</form>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content_footer %}
|
||||
|
||||
{% endblock %}
|
||||
24
eveai_app/templates/entitlements/view_license_tiers.html
Normal file
24
eveai_app/templates/entitlements/view_license_tiers.html
Normal file
@@ -0,0 +1,24 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_selectable_table, render_pagination, render_field %}
|
||||
{% block title %}License Tier Selection{% endblock %}
|
||||
{% block content_title %}Select a License Tier{% endblock %}
|
||||
{% block content_description %}Select a License Tier to continue{% endblock %}
|
||||
{% block content %}
|
||||
|
||||
<!-- License Tier Selection Form -->
|
||||
<form method="POST" action="{{ url_for('entitlements_bp.handle_license_tier_selection') }}">
|
||||
{{ render_selectable_table(headers=["ID", "Name", "Version", "Start Date", "End Date"], rows=rows, selectable=True, id="licenseTierTable") }}
|
||||
<div class="form-group mt-3">
|
||||
<button type="submit" name="action" value="edit_license_tier" class="btn btn-primary">Edit License Tier</button>
|
||||
<button type="submit" name="action" value="create_license_for_tenant" class="btn btn-secondary">Create License for Current Tenant</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block content_footer %}
|
||||
{{ render_pagination(pagination, 'user_bp.select_tenant') }}
|
||||
{% endblock %}
|
||||
|
||||
|
||||
|
||||
28
eveai_app/templates/entitlements/view_usages.html
Normal file
28
eveai_app/templates/entitlements/view_usages.html
Normal file
@@ -0,0 +1,28 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_selectable_table, render_pagination %}
|
||||
|
||||
{% block title %}View License Usage{% endblock %}
|
||||
|
||||
{% block content_title %}View License Usage{% endblock %}
|
||||
{% block content_description %}View License Usage{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<form action="{{ url_for('user_bp.handle_user_action') }}" method="POST">
|
||||
{{ render_selectable_table(headers=["Usage ID", "Start Date", "End Date", "Storage (MiB)", "Embedding (MiB)", "Interaction (tokens)"], rows=rows, selectable=False, id="usagesTable") }}
|
||||
<!-- <div class="form-group mt-3">-->
|
||||
<!-- <button type="submit" name="action" value="edit_user" class="btn btn-primary">Edit Selected User</button>-->
|
||||
<!-- <button type="submit" name="action" value="resend_confirmation_email" class="btn btn-secondary">Resend Confirmation Email</button>-->
|
||||
<!-- <button type="submit" name="action" value="send_password_reset_email" class="btn btn-secondary">Send Password Reset Email</button>-->
|
||||
<!-- <button type="submit" name="action" value="reset_uniquifier" class="btn btn-secondary">Reset Uniquifier</button>-->
|
||||
<!-- <!– Additional buttons can be added here for other actions –>-->
|
||||
<!-- </div>-->
|
||||
</form>
|
||||
{% endblock %}
|
||||
|
||||
{% block content_footer %}
|
||||
{{ render_pagination(pagination, 'user_bp.select_tenant') }}
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
|
||||
{% endblock %}
|
||||
@@ -1,16 +1,16 @@
|
||||
{% macro render_field(field, disabled_fields=[], exclude_fields=[]) %}
|
||||
{% macro render_field(field, disabled_fields=[], exclude_fields=[], class='') %}
|
||||
{% set disabled = field.name in disabled_fields %}
|
||||
{% set exclude_fields = exclude_fields + ['csrf_token', 'submit'] %}
|
||||
{% if field.name not in exclude_fields %}
|
||||
{% if field.type == 'BooleanField' %}
|
||||
<div class="form-check">
|
||||
{{ field(class="form-check-input", type="checkbox", id="flexSwitchCheckDefault") }}
|
||||
{{ field(class="form-check-input " + class, type="checkbox", id="flexSwitchCheckDefault") }}
|
||||
{{ field.label(class="form-check-label", for="flexSwitchCheckDefault", disabled=disabled) }}
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="form-group">
|
||||
{{ field.label(class="form-label") }}
|
||||
{{ field(class="form-control", disabled=disabled) }}
|
||||
{{ field(class="form-control " + class, disabled=disabled) }}
|
||||
{% if field.errors %}
|
||||
<div class="invalid-feedback">
|
||||
{% for error in field.errors %}
|
||||
|
||||
@@ -94,6 +94,14 @@
|
||||
{'name': 'Chat Sessions', 'url': '/interaction/chat_sessions', 'roles': ['Super User', 'Tenant Admin']},
|
||||
]) }}
|
||||
{% endif %}
|
||||
{% if current_user.is_authenticated %}
|
||||
{{ dropdown('Administration', 'settings', [
|
||||
{'name': 'License Tier Registration', 'url': '/entitlements/license_tier', 'roles': ['Super User']},
|
||||
{'name': 'All License Tiers', 'url': '/entitlements/view_license_tiers', 'roles': ['Super User']},
|
||||
{'name': 'Trigger Actions', 'url': '/administration/trigger_actions', 'roles': ['Super User']},
|
||||
{'name': 'Usage', 'url': '/entitlements/view_usages', 'roles': ['Super User', 'Tenant Admin']},
|
||||
]) }}
|
||||
{% endif %}
|
||||
{% if current_user.is_authenticated %}
|
||||
{{ dropdown(current_user.user_name, 'person', [
|
||||
{'name': 'Session Defaults', 'url': '/session_defaults', 'roles': ['Super User', 'Tenant Admin']},
|
||||
|
||||
@@ -13,3 +13,5 @@
|
||||
<script src="{{url_for('static', filename='assets/js/plugins/anime.min.js')}}"></script>
|
||||
<script src="{{url_for('static', filename='assets/js/material-kit-pro.min.js')}}?v=3.0.4 type="text/javascript"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.3.3/js/bootstrap.bundle.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/select2/4.0.13/js/select2.min.js"></script>
|
||||
|
||||
|
||||
@@ -1,22 +1,52 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_selectable_table, render_pagination %}
|
||||
|
||||
{% from "macros.html" import render_selectable_table, render_pagination, render_field %}
|
||||
{% block title %}Tenant Selection{% endblock %}
|
||||
|
||||
{% block content_title %}Select a Tenant{% endblock %}
|
||||
{% block content_description %}Select the active tenant for the current session{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<!-- Filter Form -->
|
||||
<form method="POST" action="{{ url_for('user_bp.select_tenant') }}" class="mb-4">
|
||||
{{ filter_form.hidden_tag() }}
|
||||
<div class="row">
|
||||
<div class="col-md-4">
|
||||
{{ render_field(filter_form.types, class="select2") }}
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
{{ render_field(filter_form.search) }}
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
{{ filter_form.submit(class="btn btn-primary") }}
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
<!-- Tenant Selection Form -->
|
||||
<form method="POST" action="{{ url_for('user_bp.handle_tenant_selection') }}">
|
||||
{{ render_selectable_table(headers=["Tenant ID", "Tenant Name", "Website"], rows=rows, selectable=True, id="tenantsTable") }}
|
||||
{{ render_selectable_table(headers=["Tenant ID", "Tenant Name", "Website", "Type"], rows=rows, selectable=True, id="tenantsTable") }}
|
||||
<div class="form-group mt-3">
|
||||
<button type="submit" name="action" value="select_tenant" class="btn btn-primary">Set Session Tenant</button>
|
||||
<button type="submit" name="action" value="edit_tenant" class="btn btn-secondary">Edit Tenant</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block content_footer %}
|
||||
{{ render_pagination(pagination, 'user_bp.select_tenant') }}
|
||||
{{ render_pagination(pagination, 'user_bp.select_tenant') }}
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
$('.select2').select2({
|
||||
placeholder: "Select tenant types",
|
||||
allowClear: true,
|
||||
minimumResultsForSearch: Infinity, // Hides the search box
|
||||
dropdownCssClass: 'select2-dropdown-hidden', // Custom class for dropdown
|
||||
containerCssClass: 'select2-container-hidden' // Custom class for container
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
@@ -1,21 +1,219 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from "macros.html" import render_field %}
|
||||
{% from "macros.html" import render_field, render_included_field %}
|
||||
|
||||
{% block title %}Tenant Registration{% endblock %}
|
||||
{% block title %}Create or Edit Tenant{% endblock %}
|
||||
|
||||
{% block content_title %}Register Tenant{% endblock %}
|
||||
{% block content_description %}Add a new tenant to EveAI{% endblock %}
|
||||
{% block content_title %}Create or Edit Tenant{% endblock %}
|
||||
{% block content_description %}Create or Edit Tenant{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
{% set disabled_fields = [] %}
|
||||
{% set exclude_fields = [] %}
|
||||
<!-- Main Tenant Information -->
|
||||
{% set main_fields = ['name', 'website', 'default_language', 'allowed_languages', 'rag_context', 'type'] %}
|
||||
{% for field in form %}
|
||||
{{ render_field(field, disabled_fields, exclude_fields) }}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=main_fields) }}
|
||||
{% endfor %}
|
||||
<button type="submit" class="btn btn-primary">Register Tenant</button>
|
||||
|
||||
<!-- Nav Tabs -->
|
||||
<div class="row mt-5">
|
||||
<div class="col-lg-12">
|
||||
<div class="nav-wrapper position-relative end-0">
|
||||
<ul class="nav nav-pills nav-fill p-1" role="tablist">
|
||||
<li class="nav-item" role="presentation">
|
||||
<a class="nav-link mb-0 px-0 py-1 active" data-toggle="tab" href="#model-info-tab" role="tab" aria-controls="model-info" aria-selected="true">
|
||||
Model Information
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#license-info-tab" role="tab" aria-controls="license-info" aria-selected="false">
|
||||
License Information
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#chunking-tab" role="tab" aria-controls="chunking" aria-selected="false">
|
||||
Chunking
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#embedding-search-tab" role="tab" aria-controls="html-chunking" aria-selected="false">
|
||||
Embedding Search
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link mb-0 px-0 py-1" data-toggle="tab" href="#tuning-tab" role="tab" aria-controls="html-chunking" aria-selected="false">
|
||||
Tuning
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tab-content tab-space">
|
||||
<!-- Model Information Tab -->
|
||||
<div class="tab-pane fade show active" id="model-info-tab" role="tabpanel">
|
||||
{% set model_fields = ['embedding_model', 'llm_model'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=model_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- License Information Tab -->
|
||||
<div class="tab-pane fade" id="license-info-tab" role="tabpanel">
|
||||
{% set license_fields = ['currency', 'usage_email', ] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=license_fields) }}
|
||||
{% endfor %}
|
||||
<!-- Register API Key Button -->
|
||||
<button type="button" class="btn btn-primary" onclick="generateNewChatApiKey()">Register Chat API Key</button>
|
||||
<button type="button" class="btn btn-primary" onclick="generateNewApiKey()">Register API Key</button>
|
||||
<!-- API Key Display Field -->
|
||||
<div id="chat-api-key-field" style="display:none;">
|
||||
<label for="chat-api-key">Chat API Key:</label>
|
||||
<input type="text" id="chat-api-key" class="form-control" readonly>
|
||||
<button type="button" id="copy-chat-button" class="btn btn-primary">Copy to Clipboard</button>
|
||||
<p id="copy-chat-message" style="display:none;color:green;">Chat API key copied to clipboard</p>
|
||||
</div>
|
||||
<div id="api-key-field" style="display:none;">
|
||||
<label for="api-key">API Key:</label>
|
||||
<input type="text" id="api-key" class="form-control" readonly>
|
||||
<button type="button" id="copy-api-button" class="btn btn-primary">Copy to Clipboard</button>
|
||||
<p id="copy-message" style="display:none;color:green;">API key copied to clipboard</p>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Chunking Settings Tab -->
|
||||
<div class="tab-pane fade" id="chunking-tab" role="tabpanel">
|
||||
{% set html_fields = ['html_tags', 'html_end_tags', 'html_included_elements', 'html_excluded_elements', 'html_excluded_classes', 'min_chunk_size', 'max_chunk_size'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=html_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Embedding Search Settings Tab -->
|
||||
<div class="tab-pane fade" id="embedding-search-tab" role="tabpanel">
|
||||
{% set es_fields = ['es_k', 'es_similarity_threshold', ] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=es_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
<!-- Tuning Settings Tab -->
|
||||
<div class="tab-pane fade" id="tuning-tab" role="tabpanel">
|
||||
{% set tuning_fields = ['embed_tuning', 'rag_tuning', ] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=[], include_fields=tuning_fields) }}
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<button type="submit" class="btn btn-primary">Save Tenant</button>
|
||||
</form>
|
||||
{% endblock %}
|
||||
|
||||
{% block content_footer %} {% endblock %}
|
||||
|
||||
{% block content_footer %}
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
<script>
|
||||
// Function to generate a new Chat API Key
|
||||
function generateNewChatApiKey() {
|
||||
generateApiKey('/admin/user/generate_chat_api_key', '#chat-api-key', '#chat-api-key-field');
|
||||
}
|
||||
|
||||
// Function to generate a new general API Key
|
||||
function generateNewApiKey() {
|
||||
generateApiKey('/admin/user/generate_api_api_key', '#api-key', '#api-key-field');
|
||||
}
|
||||
|
||||
// Reusable function to handle API key generation
|
||||
function generateApiKey(url, inputSelector, fieldSelector) {
|
||||
$.ajax({
|
||||
url: url,
|
||||
type: 'POST',
|
||||
contentType: 'application/json',
|
||||
success: function(response) {
|
||||
$(inputSelector).val(response.api_key);
|
||||
$(fieldSelector).show();
|
||||
},
|
||||
error: function(error) {
|
||||
alert('Error generating new API key: ' + error.responseText);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Function to copy text to clipboard
|
||||
function copyToClipboard(selector, messageSelector) {
|
||||
const element = document.querySelector(selector);
|
||||
if (element) {
|
||||
const text = element.value;
|
||||
if (navigator.clipboard && navigator.clipboard.writeText) {
|
||||
navigator.clipboard.writeText(text).then(function() {
|
||||
showCopyMessage(messageSelector);
|
||||
}).catch(function(error) {
|
||||
alert('Failed to copy text: ' + error);
|
||||
});
|
||||
} else {
|
||||
fallbackCopyToClipboard(text, messageSelector);
|
||||
}
|
||||
} else {
|
||||
console.error('Element not found for selector:', selector);
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback method for copying text to clipboard
|
||||
function fallbackCopyToClipboard(text, messageSelector) {
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = text;
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
try {
|
||||
document.execCommand('copy');
|
||||
showCopyMessage(messageSelector);
|
||||
} catch (err) {
|
||||
alert('Fallback: Oops, unable to copy', err);
|
||||
}
|
||||
document.body.removeChild(textArea);
|
||||
}
|
||||
|
||||
// Function to show copy confirmation message
|
||||
function showCopyMessage(messageSelector) {
|
||||
const message = document.querySelector(messageSelector);
|
||||
if (message) {
|
||||
message.style.display = 'block';
|
||||
setTimeout(function() {
|
||||
message.style.display = 'none';
|
||||
}, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
// Event listeners for copy buttons
|
||||
document.getElementById('copy-chat-button').addEventListener('click', function() {
|
||||
copyToClipboard('#chat-api-key', '#copy-chat-message');
|
||||
});
|
||||
|
||||
document.getElementById('copy-api-button').addEventListener('click', function() {
|
||||
copyToClipboard('#api-key', '#copy-message');
|
||||
});
|
||||
</script>
|
||||
<script>
|
||||
// JavaScript to detect user's timezone
|
||||
document.addEventListener('DOMContentLoaded', (event) => {
|
||||
// Detect timezone
|
||||
const userTimezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
|
||||
|
||||
// Send timezone to the server via a POST request
|
||||
fetch('/set_user_timezone', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({ timezone: userTimezone })
|
||||
}).then(response => {
|
||||
if (response.ok) {
|
||||
console.log('Timezone sent to server successfully');
|
||||
} else {
|
||||
console.error('Failed to send timezone to server');
|
||||
}
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -10,13 +10,13 @@
|
||||
<form method="post">
|
||||
{{ form.hidden_tag() }}
|
||||
<!-- Main Tenant Information -->
|
||||
{% set main_fields = ['name', 'website', 'default_language', 'allowed_languages'] %}
|
||||
{% set main_fields = ['name', 'website', 'default_language', 'allowed_languages', 'rag_context', 'type'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=main_fields, include_fields=main_fields) }}
|
||||
{% endfor %}
|
||||
|
||||
<!-- Nav Tabs -->
|
||||
<div class="row">
|
||||
<div class="row mt-5">
|
||||
<div class="col-lg-12">
|
||||
<div class="nav-wrapper position-relative end-0">
|
||||
<ul class="nav nav-pills nav-fill p-1" role="tablist">
|
||||
@@ -57,7 +57,7 @@
|
||||
</div>
|
||||
<!-- License Information Tab -->
|
||||
<div class="tab-pane fade" id="license-info-tab" role="tabpanel">
|
||||
{% set license_fields = ['license_start_date', 'license_end_date', 'allowed_monthly_interactions', ] %}
|
||||
{% set license_fields = ['currency', 'usage_email', ] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=license_fields, include_fields=license_fields) }}
|
||||
{% endfor %}
|
||||
@@ -80,7 +80,7 @@
|
||||
</div>
|
||||
<!-- Chunking Settings Tab -->
|
||||
<div class="tab-pane fade" id="chunking-tab" role="tabpanel">
|
||||
{% set html_fields = ['html_tags', 'html_end_tags', 'html_included_elements', 'html_excluded_elements', 'min_chunk_size', 'max_chunk_size'] %}
|
||||
{% set html_fields = ['html_tags', 'html_end_tags', 'html_included_elements', 'html_excluded_elements', 'html_excluded_classes', 'min_chunk_size', 'max_chunk_size'] %}
|
||||
{% for field in form %}
|
||||
{{ render_included_field(field, disabled_fields=html_fields, include_fields=html_fields) }}
|
||||
{% endfor %}
|
||||
|
||||
7
eveai_app/views/administration_forms.py
Normal file
7
eveai_app/views/administration_forms.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from flask import current_app
|
||||
from flask_wtf import FlaskForm
|
||||
from wtforms.fields.simple import SubmitField
|
||||
|
||||
|
||||
class TriggerActionForm(FlaskForm):
|
||||
submit = SubmitField('Submit')
|
||||
39
eveai_app/views/administration_views.py
Normal file
39
eveai_app/views/administration_views.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import uuid
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from flask import request, redirect, flash, render_template, Blueprint, session, current_app, jsonify
|
||||
from flask_security import hash_password, roles_required, roles_accepted, current_user
|
||||
from itsdangerous import URLSafeTimedSerializer
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from common.utils.celery_utils import current_celery
|
||||
from common.utils.view_assistants import prepare_table_for_macro, form_validation_failed
|
||||
from common.utils.nginx_utils import prefixed_url_for
|
||||
from .administration_forms import TriggerActionForm
|
||||
|
||||
administration_bp = Blueprint('administration_bp', __name__, url_prefix='/administration')
|
||||
|
||||
|
||||
@administration_bp.route('/trigger_actions', methods=['GET'])
|
||||
@roles_accepted('Super User')
|
||||
def trigger_actions():
|
||||
form = TriggerActionForm()
|
||||
return render_template('administration/trigger_actions.html', form=form)
|
||||
|
||||
|
||||
@administration_bp.route('/handle_trigger_action', methods=['POST'])
|
||||
@roles_accepted('Super User')
|
||||
def handle_trigger_action():
|
||||
action = request.form['action']
|
||||
match action:
|
||||
case 'update_usages':
|
||||
try:
|
||||
# Use send_task to trigger the task since it's part of another component (eveai_entitlements)
|
||||
task = current_celery.send_task('update_usages', queue='entitlements')
|
||||
|
||||
current_app.logger.info(f"Usage update task triggered: {task.id}")
|
||||
flash('Usage update task has been triggered successfully!', 'success')
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Failed to trigger usage update task: {str(e)}")
|
||||
flash(f'Failed to trigger usage update: {str(e)}', 'danger')
|
||||
|
||||
return redirect(prefixed_url_for('administration_bp.trigger_actions'))
|
||||
@@ -4,6 +4,7 @@ from wtforms import (StringField, BooleanField, SubmitField, DateField,
|
||||
SelectField, FieldList, FormField, TextAreaField, URLField)
|
||||
from wtforms.validators import DataRequired, Length, Optional, URL, ValidationError
|
||||
from flask_wtf.file import FileField, FileAllowed, FileRequired
|
||||
import json
|
||||
|
||||
|
||||
def allowed_file(form, field):
|
||||
@@ -14,12 +15,21 @@ def allowed_file(form, field):
|
||||
raise ValidationError('Unsupported file type.')
|
||||
|
||||
|
||||
def validate_json(form, field):
|
||||
if field.data:
|
||||
try:
|
||||
json.loads(field.data)
|
||||
except json.JSONDecodeError:
|
||||
raise ValidationError('Invalid JSON format')
|
||||
|
||||
|
||||
class AddDocumentForm(FlaskForm):
|
||||
file = FileField('File', validators=[FileRequired(), allowed_file])
|
||||
name = StringField('Name', validators=[Length(max=100)])
|
||||
language = SelectField('Language', choices=[], validators=[Optional()])
|
||||
user_context = TextAreaField('User Context', validators=[Optional()])
|
||||
valid_from = DateField('Valid from', id='form-control datepicker', validators=[Optional()])
|
||||
user_metadata = TextAreaField('User Metadata', validators=[Optional(), validate_json])
|
||||
|
||||
submit = SubmitField('Submit')
|
||||
|
||||
@@ -27,7 +37,8 @@ class AddDocumentForm(FlaskForm):
|
||||
super().__init__()
|
||||
self.language.choices = [(language, language) for language in
|
||||
session.get('tenant').get('allowed_languages')]
|
||||
self.language.data = session.get('tenant').get('default_language')
|
||||
if not self.language.data:
|
||||
self.language.data = session.get('tenant').get('default_language')
|
||||
|
||||
|
||||
class AddURLForm(FlaskForm):
|
||||
@@ -36,6 +47,7 @@ class AddURLForm(FlaskForm):
|
||||
language = SelectField('Language', choices=[], validators=[Optional()])
|
||||
user_context = TextAreaField('User Context', validators=[Optional()])
|
||||
valid_from = DateField('Valid from', id='form-control datepicker', validators=[Optional()])
|
||||
user_metadata = TextAreaField('User Metadata', validators=[Optional(), validate_json])
|
||||
|
||||
submit = SubmitField('Submit')
|
||||
|
||||
@@ -43,7 +55,8 @@ class AddURLForm(FlaskForm):
|
||||
super().__init__()
|
||||
self.language.choices = [(language, language) for language in
|
||||
session.get('tenant').get('allowed_languages')]
|
||||
self.language.data = session.get('tenant').get('default_language')
|
||||
if not self.language.data:
|
||||
self.language.data = session.get('tenant').get('default_language')
|
||||
|
||||
|
||||
class AddURLsForm(FlaskForm):
|
||||
@@ -59,22 +72,8 @@ class AddURLsForm(FlaskForm):
|
||||
super().__init__()
|
||||
self.language.choices = [(language, language) for language in
|
||||
session.get('tenant').get('allowed_languages')]
|
||||
self.language.data = session.get('tenant').get('default_language')
|
||||
|
||||
|
||||
class AddYoutubeForm(FlaskForm):
|
||||
url = URLField('Youtube URL', validators=[DataRequired(), URL()])
|
||||
name = StringField('Name', validators=[Length(max=100)])
|
||||
language = SelectField('Language', choices=[], validators=[Optional()])
|
||||
user_context = TextAreaField('User Context', validators=[Optional()])
|
||||
valid_from = DateField('Valid from', id='form-control datepicker', validators=[Optional()])
|
||||
|
||||
submit = SubmitField('Submit')
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.language.choices = [(language, language) for language in
|
||||
session.get('tenant').get('allowed_languages')]
|
||||
if not self.language.data:
|
||||
self.language.data = session.get('tenant').get('default_language')
|
||||
|
||||
|
||||
class EditDocumentForm(FlaskForm):
|
||||
@@ -89,8 +88,7 @@ class EditDocumentVersionForm(FlaskForm):
|
||||
language = StringField('Language')
|
||||
user_context = TextAreaField('User Context', validators=[Optional()])
|
||||
system_context = TextAreaField('System Context', validators=[Optional()])
|
||||
user_metadata = TextAreaField('User Metadata', validators=[Optional(), validate_json])
|
||||
system_metadata = TextAreaField('System Metadata', validators=[Optional(), validate_json])
|
||||
|
||||
submit = SubmitField('Submit')
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,27 +1,24 @@
|
||||
import ast
|
||||
import os
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
import chardet
|
||||
from flask import request, redirect, flash, render_template, Blueprint, session, current_app
|
||||
from flask_security import roles_accepted, current_user
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.orm import joinedload
|
||||
from werkzeug.datastructures import FileStorage
|
||||
from werkzeug.utils import secure_filename
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
import requests
|
||||
from requests.exceptions import SSLError
|
||||
from urllib.parse import urlparse, unquote
|
||||
import io
|
||||
from minio.error import S3Error
|
||||
import json
|
||||
|
||||
from common.models.document import Document, DocumentVersion
|
||||
from common.extensions import db, minio_client
|
||||
from common.utils.document_utils import validate_file_type, create_document_stack, start_embedding_task, process_url, \
|
||||
process_multiple_urls, prepare_youtube_document, create_version_for_document, upload_file_for_version
|
||||
process_multiple_urls, get_documents_list, edit_document, \
|
||||
edit_document_version, refresh_document
|
||||
from common.utils.eveai_exceptions import EveAIInvalidLanguageException, EveAIUnsupportedFileType, \
|
||||
EveAIDoubleURLException, EveAIYoutubeError
|
||||
EveAIDoubleURLException
|
||||
from .document_forms import AddDocumentForm, AddURLForm, EditDocumentForm, EditDocumentVersionForm, AddURLsForm
|
||||
from common.utils.middleware import mw_before_request
|
||||
from common.utils.celery_utils import current_celery
|
||||
@@ -59,11 +56,9 @@ def before_request():
|
||||
@roles_accepted('Super User', 'Tenant Admin')
|
||||
def add_document():
|
||||
form = AddDocumentForm()
|
||||
current_app.logger.debug('Adding document')
|
||||
|
||||
if form.validate_on_submit():
|
||||
try:
|
||||
current_app.logger.debug('Validating file type')
|
||||
tenant_id = session['tenant']['id']
|
||||
file = form.file.data
|
||||
filename = secure_filename(file.filename)
|
||||
@@ -71,12 +66,15 @@ def add_document():
|
||||
|
||||
validate_file_type(extension)
|
||||
|
||||
current_app.logger.debug(f'Language on form: {form.language.data}')
|
||||
api_input = {
|
||||
'name': form.name.data,
|
||||
'language': form.language.data,
|
||||
'user_context': form.user_context.data,
|
||||
'valid_from': form.valid_from.data
|
||||
'valid_from': form.valid_from.data,
|
||||
'user_metadata': json.loads(form.user_metadata.data) if form.user_metadata.data else None,
|
||||
}
|
||||
current_app.logger.debug(f'Creating document stack with input {api_input}')
|
||||
|
||||
new_doc, new_doc_vers = create_document_stack(api_input, file, filename, extension, tenant_id)
|
||||
task_id = start_embedding_task(tenant_id, new_doc_vers.id)
|
||||
@@ -111,7 +109,8 @@ def add_url():
|
||||
'url': url,
|
||||
'language': form.language.data,
|
||||
'user_context': form.user_context.data,
|
||||
'valid_from': form.valid_from.data
|
||||
'valid_from': form.valid_from.data,
|
||||
'user_metadata': json.loads(form.user_metadata.data) if form.user_metadata.data else None,
|
||||
}
|
||||
|
||||
new_doc, new_doc_vers = create_document_stack(api_input, file_content, filename, extension, tenant_id)
|
||||
@@ -175,9 +174,7 @@ def documents():
|
||||
page = request.args.get('page', 1, type=int)
|
||||
per_page = request.args.get('per_page', 10, type=int)
|
||||
|
||||
query = Document.query.order_by(desc(Document.created_at))
|
||||
|
||||
pagination = query.paginate(page=page, per_page=per_page, error_out=False)
|
||||
pagination = get_documents_list(page, per_page)
|
||||
docs = pagination.items
|
||||
|
||||
rows = prepare_table_for_macro(docs, [('id', ''), ('name', ''), ('valid_from', ''), ('valid_to', '')])
|
||||
@@ -195,11 +192,11 @@ def handle_document_selection():
|
||||
|
||||
match action:
|
||||
case 'edit_document':
|
||||
return redirect(prefixed_url_for('document_bp.edit_document', document_id=doc_id))
|
||||
return redirect(prefixed_url_for('document_bp.edit_document_view', document_id=doc_id))
|
||||
case 'document_versions':
|
||||
return redirect(prefixed_url_for('document_bp.document_versions', document_id=doc_id))
|
||||
case 'refresh_document':
|
||||
refresh_document(doc_id)
|
||||
refresh_document_view(doc_id)
|
||||
return redirect(prefixed_url_for('document_bp.document_versions', document_id=doc_id))
|
||||
case 're_embed_latest_versions':
|
||||
re_embed_latest_versions()
|
||||
@@ -210,25 +207,22 @@ def handle_document_selection():
|
||||
|
||||
@document_bp.route('/edit_document/<int:document_id>', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User', 'Tenant Admin')
|
||||
def edit_document(document_id):
|
||||
def edit_document_view(document_id):
|
||||
doc = Document.query.get_or_404(document_id)
|
||||
form = EditDocumentForm(obj=doc)
|
||||
|
||||
if form.validate_on_submit():
|
||||
doc.name = form.name.data
|
||||
doc.valid_from = form.valid_from.data
|
||||
doc.valid_to = form.valid_to.data
|
||||
|
||||
update_logging_information(doc, dt.now(tz.utc))
|
||||
|
||||
try:
|
||||
db.session.add(doc)
|
||||
db.session.commit()
|
||||
flash(f'Document {doc.id} updated successfully', 'success')
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
flash(f'Error updating document: {e}', 'danger')
|
||||
current_app.logger.error(f'Error updating document: {e}')
|
||||
updated_doc, error = edit_document(
|
||||
document_id,
|
||||
form.name.data,
|
||||
form.valid_from.data,
|
||||
form.valid_to.data
|
||||
)
|
||||
if updated_doc:
|
||||
flash(f'Document {updated_doc.id} updated successfully', 'success')
|
||||
return redirect(prefixed_url_for('document_bp.documents'))
|
||||
else:
|
||||
flash(f'Error updating document: {error}', 'danger')
|
||||
else:
|
||||
form_validation_failed(request, form)
|
||||
|
||||
@@ -237,24 +231,20 @@ def edit_document(document_id):
|
||||
|
||||
@document_bp.route('/edit_document_version/<int:document_version_id>', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User', 'Tenant Admin')
|
||||
def edit_document_version(document_version_id):
|
||||
def edit_document_version_view(document_version_id):
|
||||
doc_vers = DocumentVersion.query.get_or_404(document_version_id)
|
||||
form = EditDocumentVersionForm(obj=doc_vers)
|
||||
|
||||
if form.validate_on_submit():
|
||||
doc_vers.user_context = form.user_context.data
|
||||
|
||||
update_logging_information(doc_vers, dt.now(tz.utc))
|
||||
|
||||
try:
|
||||
db.session.add(doc_vers)
|
||||
db.session.commit()
|
||||
flash(f'Document Version {doc_vers.id} updated successfully', 'success')
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
flash(f'Error updating document version: {e}', 'danger')
|
||||
current_app.logger.error(f'Error updating document version {doc_vers.id} '
|
||||
f'for tenant {session['tenant']['id']}: {e}')
|
||||
updated_version, error = edit_document_version(
|
||||
document_version_id,
|
||||
form.user_context.data
|
||||
)
|
||||
if updated_version:
|
||||
flash(f'Document Version {updated_version.id} updated successfully', 'success')
|
||||
return redirect(prefixed_url_for('document_bp.document_versions', document_id=updated_version.doc_id))
|
||||
else:
|
||||
flash(f'Error updating document version: {error}', 'danger')
|
||||
else:
|
||||
form_validation_failed(request, form)
|
||||
|
||||
@@ -265,8 +255,8 @@ def edit_document_version(document_version_id):
|
||||
@document_bp.route('/document_versions/<int:document_id>', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User', 'Tenant Admin')
|
||||
def document_versions(document_id):
|
||||
doc_vers = DocumentVersion.query.get_or_404(document_id)
|
||||
doc_desc = f'Document {doc_vers.document.name}, Language {doc_vers.language}'
|
||||
doc = Document.query.get_or_404(document_id)
|
||||
doc_desc = f'Document {doc.name}'
|
||||
|
||||
page = request.args.get('page', 1, type=int)
|
||||
per_page = request.args.get('per_page', 10, type=int)
|
||||
@@ -278,8 +268,8 @@ def document_versions(document_id):
|
||||
pagination = query.paginate(page=page, per_page=per_page, error_out=False)
|
||||
doc_langs = pagination.items
|
||||
|
||||
rows = prepare_table_for_macro(doc_langs, [('id', ''), ('url', ''), ('file_location', ''),
|
||||
('file_name', ''), ('file_type', ''),
|
||||
rows = prepare_table_for_macro(doc_langs, [('id', ''), ('url', ''),
|
||||
('object_name', ''), ('file_type', ''),
|
||||
('processing', ''), ('processing_started_at', ''),
|
||||
('processing_finished_at', ''), ('processing_error', '')])
|
||||
|
||||
@@ -298,7 +288,7 @@ def handle_document_version_selection():
|
||||
|
||||
match action:
|
||||
case 'edit_document_version':
|
||||
return redirect(prefixed_url_for('document_bp.edit_document_version', document_version_id=doc_vers_id))
|
||||
return redirect(prefixed_url_for('document_bp.edit_document_version_view', document_version_id=doc_vers_id))
|
||||
case 'process_document_version':
|
||||
process_version(doc_vers_id)
|
||||
# Add more conditions for other actions
|
||||
@@ -341,55 +331,13 @@ def refresh_all_documents():
|
||||
refresh_document(doc.id)
|
||||
|
||||
|
||||
def refresh_document(doc_id):
|
||||
doc = Document.query.get_or_404(doc_id)
|
||||
doc_vers = DocumentVersion.query.filter_by(doc_id=doc_id).order_by(desc(DocumentVersion.id)).first()
|
||||
if not doc_vers.url:
|
||||
current_app.logger.info(f'Document {doc_id} has no URL, skipping refresh')
|
||||
flash(f'This document has no URL. I can only refresh documents with a URL. skipping refresh', 'alert')
|
||||
return
|
||||
|
||||
new_doc_vers = create_version_for_document(doc, doc_vers.url, doc_vers.language, doc_vers.user_context)
|
||||
|
||||
try:
|
||||
db.session.add(new_doc_vers)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'Error refreshing document {doc_id} for tenant {session["tenant"]["id"]}: {e}')
|
||||
flash('Error refreshing document.', 'alert')
|
||||
db.session.rollback()
|
||||
error = e.args
|
||||
raise
|
||||
except Exception as e:
|
||||
current_app.logger.error('Unknown error')
|
||||
raise
|
||||
|
||||
html = fetch_html(new_doc_vers.url)
|
||||
file = io.BytesIO(html)
|
||||
|
||||
parsed_url = urlparse(new_doc_vers.url)
|
||||
path_parts = parsed_url.path.split('/')
|
||||
filename = path_parts[-1]
|
||||
if filename == '':
|
||||
filename = 'index'
|
||||
if not filename.endswith('.html'):
|
||||
filename += '.html'
|
||||
extension = 'html'
|
||||
|
||||
current_app.logger.info(f'Document added successfully for tenant {session["tenant"]["id"]}, '
|
||||
f'Document Version {new_doc_vers.id}')
|
||||
|
||||
upload_file_for_version(new_doc_vers, file, extension, session["tenant"]["id"])
|
||||
|
||||
task = current_celery.send_task('create_embeddings', queue='embeddings', args=[
|
||||
session['tenant']['id'],
|
||||
new_doc_vers.id,
|
||||
])
|
||||
current_app.logger.info(f'Embedding creation started for tenant {session["tenant"]["id"]}, '
|
||||
f'Document Version {new_doc_vers.id}. '
|
||||
f'Embedding creation task: {task.id}')
|
||||
flash(f'Processing on document {doc.name}, version {new_doc_vers.id} started. Task ID: {task.id}.',
|
||||
'success')
|
||||
def refresh_document_view(document_id):
|
||||
new_version, result = refresh_document(document_id)
|
||||
if new_version:
|
||||
flash(f'Document refreshed. New version: {new_version.id}. Task ID: {result}', 'success')
|
||||
else:
|
||||
flash(f'Error refreshing document: {result}', 'danger')
|
||||
return redirect(prefixed_url_for('document_bp.documents'))
|
||||
|
||||
|
||||
def re_embed_latest_versions():
|
||||
@@ -401,10 +349,9 @@ def re_embed_latest_versions():
|
||||
|
||||
|
||||
def process_version(version_id):
|
||||
task = current_celery.send_task('create_embeddings', queue='embeddings', args=[
|
||||
session['tenant']['id'],
|
||||
version_id,
|
||||
])
|
||||
task = current_celery.send_task('create_embeddings',
|
||||
args=[session['tenant']['id'], version_id,],
|
||||
queue='embeddings')
|
||||
current_app.logger.info(f'Embedding creation retriggered by user {current_user.id}, {current_user.email} '
|
||||
f'for tenant {session["tenant"]["id"]}, '
|
||||
f'Document Version {version_id}. '
|
||||
|
||||
76
eveai_app/views/entitlements_forms.py
Normal file
76
eveai_app/views/entitlements_forms.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from flask import current_app
|
||||
from flask_wtf import FlaskForm
|
||||
from wtforms import (StringField, PasswordField, BooleanField, SubmitField, EmailField, IntegerField, DateField,
|
||||
SelectField, SelectMultipleField, FieldList, FormField, FloatField, TextAreaField)
|
||||
from wtforms.validators import DataRequired, Length, Email, NumberRange, Optional, ValidationError, InputRequired
|
||||
import pytz
|
||||
|
||||
|
||||
class LicenseTierForm(FlaskForm):
|
||||
name = StringField('Name', validators=[DataRequired(), Length(max=50)])
|
||||
version = StringField('Version', validators=[DataRequired(), Length(max=50)])
|
||||
start_date = DateField('Start Date', id='form-control datepicker', validators=[DataRequired()])
|
||||
end_date = DateField('End Date', id='form-control datepicker', validators=[Optional()])
|
||||
basic_fee_d = FloatField('Basic Fee ($)', validators=[InputRequired(), NumberRange(min=0)])
|
||||
basic_fee_e = FloatField('Basic Fee (€)', validators=[InputRequired(), NumberRange(min=0)])
|
||||
max_storage_mb = IntegerField('Max Storage (MiB)', validators=[DataRequired(), NumberRange(min=1)])
|
||||
additional_storage_price_d = FloatField('Additional Storage Fee ($)',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_storage_price_e = FloatField('Additional Storage Fee (€)',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_storage_bucket = IntegerField('Additional Storage Bucket Size (MiB)',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
included_embedding_mb = IntegerField('Included Embeddings (MiB)',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
additional_embedding_price_d = FloatField('Additional Embedding Fee ($)',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_embedding_price_e = FloatField('Additional Embedding Fee (€)',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_embedding_bucket = IntegerField('Additional Embedding Bucket Size (MiB)',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
included_interaction_tokens = IntegerField('Included Embedding Tokens',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
additional_interaction_token_price_d = FloatField('Additional Interaction Token Fee ($)',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_interaction_token_price_e = FloatField('Additional Interaction Token Fee (€)',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_interaction_bucket = IntegerField('Additional Interaction Bucket Size',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
standard_overage_embedding = FloatField('Standard Overage Embedding (%)',
|
||||
validators=[DataRequired(), NumberRange(min=0)],
|
||||
default=0)
|
||||
standard_overage_interaction = FloatField('Standard Overage Interaction (%)',
|
||||
validators=[DataRequired(), NumberRange(min=0)],
|
||||
default=0)
|
||||
|
||||
|
||||
class LicenseForm(FlaskForm):
|
||||
start_date = DateField('Start Date', id='form-control datepicker', validators=[DataRequired()])
|
||||
end_date = DateField('End Date', id='form-control datepicker', validators=[DataRequired()])
|
||||
currency = StringField('Currency', validators=[Optional(), Length(max=20)])
|
||||
yearly_payment = BooleanField('Yearly Payment', validators=[DataRequired()], default=False)
|
||||
basic_fee = FloatField('Basic Fee', validators=[InputRequired(), NumberRange(min=0)])
|
||||
max_storage_mb = IntegerField('Max Storage (MiB)', validators=[DataRequired(), NumberRange(min=1)])
|
||||
additional_storage_price = FloatField('Additional Storage Token Fee',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_storage_bucket = IntegerField('Additional Storage Bucket Size (MiB)',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
included_embedding_mb = IntegerField('Included Embedding Tokens (MiB)',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
additional_embedding_price = FloatField('Additional Embedding Token Fee',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_embedding_bucket = IntegerField('Additional Embedding Bucket Size (MiB)',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
included_interaction_tokens = IntegerField('Included Interaction Tokens',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
additional_interaction_token_price = FloatField('Additional Interaction Token Fee',
|
||||
validators=[InputRequired(), NumberRange(min=0)])
|
||||
additional_interaction_bucket = IntegerField('Additional Interaction Bucket Size',
|
||||
validators=[DataRequired(), NumberRange(min=1)])
|
||||
overage_embedding = FloatField('Overage Embedding (%)',
|
||||
validators=[DataRequired(), NumberRange(min=0)],
|
||||
default=0)
|
||||
overage_interaction = FloatField('Overage Interaction (%)',
|
||||
validators=[DataRequired(), NumberRange(min=0)],
|
||||
default=0)
|
||||
|
||||
235
eveai_app/views/entitlements_views.py
Normal file
235
eveai_app/views/entitlements_views.py
Normal file
@@ -0,0 +1,235 @@
|
||||
import uuid
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from flask import request, redirect, flash, render_template, Blueprint, session, current_app, jsonify
|
||||
from flask_security import hash_password, roles_required, roles_accepted, current_user
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from sqlalchemy import or_, desc
|
||||
import ast
|
||||
|
||||
from common.models.entitlements import License, LicenseTier, LicenseUsage, BusinessEventLog
|
||||
from common.extensions import db, security, minio_client, simple_encryption
|
||||
from .entitlements_forms import LicenseTierForm, LicenseForm
|
||||
from common.utils.view_assistants import prepare_table_for_macro, form_validation_failed
|
||||
from common.utils.nginx_utils import prefixed_url_for
|
||||
|
||||
entitlements_bp = Blueprint('entitlements_bp', __name__, url_prefix='/entitlements')
|
||||
|
||||
|
||||
@entitlements_bp.route('/license_tier', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User')
|
||||
def license_tier():
|
||||
form = LicenseTierForm()
|
||||
if form.validate_on_submit():
|
||||
current_app.logger.info("Adding License Tier")
|
||||
|
||||
new_license_tier = LicenseTier()
|
||||
form.populate_obj(new_license_tier)
|
||||
|
||||
try:
|
||||
db.session.add(new_license_tier)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f'Failed to add license tier to database. Error: {str(e)}')
|
||||
flash(f'Failed to add license tier to database. Error: {str(e)}', 'success')
|
||||
return render_template('entitlements/license_tier.html', form=form)
|
||||
|
||||
current_app.logger.info(f"Successfully created license tier {new_license_tier.id}")
|
||||
flash(f"Successfully created tenant license tier {new_license_tier.id}")
|
||||
|
||||
return redirect(prefixed_url_for('entitlements_bp.view_license_tiers'))
|
||||
else:
|
||||
form_validation_failed(request, form)
|
||||
|
||||
return render_template('entitlements/license_tier.html', form=form)
|
||||
|
||||
|
||||
@entitlements_bp.route('/view_license_tiers', methods=['GET', 'POST'])
|
||||
@roles_required('Super User')
|
||||
def view_license_tiers():
|
||||
page = request.args.get('page', 1, type=int)
|
||||
per_page = request.args.get('per_page', 10, type=int)
|
||||
today = dt.now(tz.utc)
|
||||
|
||||
query = LicenseTier.query.filter(
|
||||
or_(
|
||||
LicenseTier.end_date == None,
|
||||
LicenseTier.end_date >= today
|
||||
)
|
||||
).order_by(LicenseTier.start_date.desc(), LicenseTier.id)
|
||||
|
||||
pagination = query.paginate(page=page, per_page=per_page, error_out=False)
|
||||
license_tiers = pagination.items
|
||||
|
||||
rows = prepare_table_for_macro(license_tiers, [('id', ''), ('name', ''), ('version', ''), ('start_date', ''),
|
||||
('end_date', '')])
|
||||
|
||||
return render_template('entitlements/view_license_tiers.html', rows=rows, pagination=pagination)
|
||||
|
||||
|
||||
@entitlements_bp.route('/handle_license_tier_selection', methods=['POST'])
|
||||
@roles_required('Super User')
|
||||
def handle_license_tier_selection():
|
||||
license_tier_identification = request.form['selected_row']
|
||||
license_tier_id = ast.literal_eval(license_tier_identification).get('value')
|
||||
the_license_tier = LicenseTier.query.get(license_tier_id)
|
||||
|
||||
action = request.form['action']
|
||||
|
||||
match action:
|
||||
case 'edit_license_tier':
|
||||
return redirect(prefixed_url_for('entitlements_bp.edit_license_tier',
|
||||
license_tier_id=license_tier_id))
|
||||
case 'create_license_for_tenant':
|
||||
return redirect(prefixed_url_for('entitlements_bp.create_license',
|
||||
license_tier_id=license_tier_id))
|
||||
# Add more conditions for other actions
|
||||
return redirect(prefixed_url_for('entitlements_bp.view_license_tiers'))
|
||||
|
||||
|
||||
@entitlements_bp.route('/license_tier/<int:license_tier_id>', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User')
|
||||
def edit_license_tier(license_tier_id):
|
||||
license_tier = LicenseTier.query.get_or_404(license_tier_id) # This will return a 404 if no license tier is found
|
||||
form = LicenseTierForm(obj=license_tier)
|
||||
|
||||
if form.validate_on_submit():
|
||||
# Populate the license_tier with form data
|
||||
form.populate_obj(license_tier)
|
||||
|
||||
try:
|
||||
db.session.add(license_tier)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f'Failed to edit License Tier. Error: {str(e)}')
|
||||
flash(f'Failed to edit License Tier. Error: {str(e)}', 'danger')
|
||||
return render_template('entitlements/license_tier.html', form=form, license_tier_id=license_tier.id)
|
||||
|
||||
flash('License Tier updated successfully.', 'success')
|
||||
return redirect(
|
||||
prefixed_url_for('entitlements_bp.edit_license_tier', license_tier_id=license_tier_id))
|
||||
else:
|
||||
form_validation_failed(request, form)
|
||||
|
||||
return render_template('entitlements/license_tier.html', form=form, license_tier_id=license_tier.id)
|
||||
|
||||
|
||||
@entitlements_bp.route('/create_license/<int:license_tier_id>', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User')
|
||||
def create_license(license_tier_id):
|
||||
form = LicenseForm()
|
||||
tenant_id = session.get('tenant').get('id')
|
||||
currency = session.get('tenant').get('currency')
|
||||
|
||||
if request.method == 'GET':
|
||||
# Fetch the LicenseTier
|
||||
license_tier = LicenseTier.query.get_or_404(license_tier_id)
|
||||
|
||||
# Prefill the form with LicenseTier data
|
||||
# Currency depending data
|
||||
if currency == '$':
|
||||
form.basic_fee.data = license_tier.basic_fee_d
|
||||
form.additional_storage_price.data = license_tier.additional_storage_price_d
|
||||
form.additional_embedding_price.data = license_tier.additional_embedding_price_d
|
||||
form.additional_interaction_token_price.data = license_tier.additional_interaction_token_price_d
|
||||
elif currency == '€':
|
||||
form.basic_fee.data = license_tier.basic_fee_e
|
||||
form.additional_storage_price.data = license_tier.additional_storage_price_e
|
||||
form.additional_embedding_price.data = license_tier.additional_embedding_price_e
|
||||
form.additional_interaction_token_price.data = license_tier.additional_interaction_token_price_e
|
||||
else:
|
||||
current_app.logger.error(f'Invalid currency {currency} for tenant {tenant_id} while creating license.')
|
||||
flash(f"Invalid currency {currency} for tenant {tenant_id} while creating license. "
|
||||
f"Check tenant's currency and try again.", 'danger')
|
||||
return redirect(prefixed_url_for('user_bp.edit_tenant', tenant_id=tenant_id))
|
||||
# General data
|
||||
form.currency.data = currency
|
||||
form.max_storage_mb.data = license_tier.max_storage_mb
|
||||
form.additional_storage_bucket.data = license_tier.additional_storage_bucket
|
||||
form.included_embedding_mb.data = license_tier.included_embedding_mb
|
||||
form.additional_embedding_bucket.data = license_tier.additional_embedding_bucket
|
||||
form.included_interaction_tokens.data = license_tier.included_interaction_tokens
|
||||
form.additional_interaction_bucket.data = license_tier.additional_interaction_bucket
|
||||
form.overage_embedding.data = license_tier.standard_overage_embedding
|
||||
form.overage_interaction.data = license_tier.standard_overage_interaction
|
||||
else: # POST
|
||||
# Create a new License instance
|
||||
new_license = License(
|
||||
tenant_id=tenant_id,
|
||||
tier_id=license_tier_id,
|
||||
)
|
||||
current_app.logger.debug(f"Currency data in form: {form.currency.data}")
|
||||
if form.validate_on_submit():
|
||||
# Update the license with form data
|
||||
form.populate_obj(new_license)
|
||||
# Currency is added here again, as a form doesn't include disabled fields when passing it in the request
|
||||
new_license.currency = currency
|
||||
|
||||
try:
|
||||
db.session.add(new_license)
|
||||
db.session.commit()
|
||||
flash('License created successfully', 'success')
|
||||
return redirect(prefixed_url_for('entitlements_bp.edit_license', license_id=new_license.id))
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
flash(f'Error creating license: {str(e)}', 'error')
|
||||
else:
|
||||
form_validation_failed(request, form)
|
||||
|
||||
return render_template('entitlements/license.html', form=form, ext_disabled_fields=[])
|
||||
|
||||
|
||||
@entitlements_bp.route('/license/<int:license_id>', methods=['GET', 'POST'])
|
||||
@roles_accepted('Super User')
|
||||
def edit_license(license_id):
|
||||
license = License.query.get_or_404(license_id) # This will return a 404 if no license tier is found
|
||||
form = LicenseForm(obj=license)
|
||||
disabled_fields = []
|
||||
if len(license.usages) > 0: # There already are usage records linked to this license
|
||||
# Define which fields should be disabled
|
||||
disabled_fields = [field.name for field in form if field.name != 'end_date']
|
||||
|
||||
if form.validate_on_submit():
|
||||
# Populate the license with form data
|
||||
form.populate_obj(license)
|
||||
|
||||
try:
|
||||
db.session.add(license)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f'Failed to edit License. Error: {str(e)}')
|
||||
flash(f'Failed to edit License. Error: {str(e)}', 'danger')
|
||||
return render_template('entitlements/license.html', form=form)
|
||||
|
||||
flash('License updated successfully.', 'success')
|
||||
return redirect(
|
||||
prefixed_url_for('entitlements_bp.edit_license', license_tier_id=license_id))
|
||||
else:
|
||||
form_validation_failed(request, form)
|
||||
|
||||
return render_template('entitlements/license.html', form=form, license_tier_id=license_tier.id,
|
||||
ext_disabled_fields=disabled_fields)
|
||||
|
||||
|
||||
@entitlements_bp.route('/view_usages')
|
||||
@roles_accepted('Super User', 'Tenant Admin')
|
||||
def view_usages():
|
||||
page = request.args.get('page', 1, type=int)
|
||||
per_page = request.args.get('per_page', 10, type=int)
|
||||
|
||||
tenant_id = session.get('tenant').get('id')
|
||||
query = LicenseUsage.query.filter_by(tenant_id=tenant_id).order_by(desc(LicenseUsage.id))
|
||||
|
||||
pagination = query.paginate(page=page, per_page=per_page)
|
||||
lus = pagination.items
|
||||
|
||||
# prepare table data
|
||||
|
||||
rows = prepare_table_for_macro(lus, [('id', ''), ('period_start_date', ''), ('period_end_date', ''),
|
||||
('storage_mb_used', ''), ('embedding_mb_used', ''),
|
||||
('interaction_total_tokens_used', '')])
|
||||
|
||||
# Render the users in a template
|
||||
return render_template('entitlements/view_usages.html', rows=rows, pagination=pagination)
|
||||
100
eveai_app/views/healthz_views.py
Normal file
100
eveai_app/views/healthz_views.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from flask import Blueprint, current_app, request
|
||||
from flask_healthz import HealthError
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from celery.exceptions import TimeoutError as CeleryTimeoutError
|
||||
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
|
||||
import time
|
||||
|
||||
from common.extensions import db, metrics, minio_client
|
||||
from common.utils.celery_utils import current_celery
|
||||
|
||||
healthz_bp = Blueprint('healthz', __name__, url_prefix='/_healthz')
|
||||
|
||||
# Define Prometheus metrics
|
||||
api_request_counter = Counter('api_request_count', 'API Request Count', ['method', 'endpoint'])
|
||||
api_request_latency = Histogram('api_request_latency_seconds', 'API Request latency')
|
||||
|
||||
|
||||
def liveness():
|
||||
try:
|
||||
# Basic check to see if the app is running
|
||||
return True
|
||||
except Exception:
|
||||
raise HealthError("Liveness check failed")
|
||||
|
||||
|
||||
def readiness():
|
||||
checks = {
|
||||
"database": check_database(),
|
||||
"celery": check_celery(),
|
||||
"minio": check_minio(),
|
||||
# Add more checks as needed
|
||||
}
|
||||
|
||||
if not all(checks.values()):
|
||||
raise HealthError("Readiness check failed")
|
||||
|
||||
|
||||
def check_database():
|
||||
try:
|
||||
# Perform a simple database query
|
||||
db.session.execute("SELECT 1")
|
||||
return True
|
||||
except SQLAlchemyError:
|
||||
current_app.logger.error("Database check failed", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def check_celery():
|
||||
try:
|
||||
# Send a simple task to Celery
|
||||
result = current_celery.send_task('ping', queue='embeddings')
|
||||
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
||||
return response == 'pong'
|
||||
except CeleryTimeoutError:
|
||||
current_app.logger.error("Celery check timed out", exc_info=True)
|
||||
return False
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Celery check failed: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def check_minio():
|
||||
try:
|
||||
# List buckets to check if MinIO is accessible
|
||||
minio_client.list_buckets()
|
||||
return True
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"MinIO check failed: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
@healthz_bp.route('/metrics')
|
||||
@metrics.do_not_track()
|
||||
def prometheus_metrics():
|
||||
return generate_latest(), 200, {'Content-Type': CONTENT_TYPE_LATEST}
|
||||
|
||||
|
||||
# Custom metrics example
|
||||
@healthz_bp.before_app_request
|
||||
def before_request():
|
||||
request.start_time = time.time()
|
||||
api_request_counter.labels(
|
||||
method=request.method, endpoint=request.endpoint
|
||||
).inc()
|
||||
|
||||
|
||||
@healthz_bp.after_app_request
|
||||
def after_request(response):
|
||||
request_duration = time.time() - request.start_time
|
||||
api_request_latency.observe(request_duration)
|
||||
return response
|
||||
|
||||
|
||||
def init_healtz(app):
|
||||
app.config.update(
|
||||
HEALTHZ={
|
||||
"live": "healthz_views.liveness",
|
||||
"ready": "healthz_views.readiness",
|
||||
}
|
||||
)
|
||||
@@ -2,7 +2,7 @@ from flask import current_app
|
||||
from flask_wtf import FlaskForm
|
||||
from wtforms import (StringField, PasswordField, BooleanField, SubmitField, EmailField, IntegerField, DateField,
|
||||
SelectField, SelectMultipleField, FieldList, FormField, FloatField, TextAreaField)
|
||||
from wtforms.validators import DataRequired, Length, Email, NumberRange, Optional
|
||||
from wtforms.validators import DataRequired, Length, Email, NumberRange, Optional, ValidationError
|
||||
import pytz
|
||||
|
||||
from common.models.user import Role
|
||||
@@ -14,17 +14,18 @@ class TenantForm(FlaskForm):
|
||||
# language fields
|
||||
default_language = SelectField('Default Language', choices=[], validators=[DataRequired()])
|
||||
allowed_languages = SelectMultipleField('Allowed Languages', choices=[], validators=[DataRequired()])
|
||||
# invoicing fields
|
||||
currency = SelectField('Currency', choices=[], validators=[DataRequired()])
|
||||
usage_email = EmailField('Usage Email', validators=[DataRequired(), Email()])
|
||||
# Timezone
|
||||
timezone = SelectField('Timezone', choices=[], validators=[DataRequired()])
|
||||
# RAG context
|
||||
rag_context = TextAreaField('RAG Context', validators=[Optional()])
|
||||
# Tenant Type
|
||||
type = SelectField('Tenant Type', validators=[Optional()], default='Active')
|
||||
# LLM fields
|
||||
embedding_model = SelectField('Embedding Model', choices=[], validators=[DataRequired()])
|
||||
llm_model = SelectField('Large Language Model', choices=[], validators=[DataRequired()])
|
||||
# license fields
|
||||
license_start_date = DateField('License Start Date', id='form-control datepicker', validators=[Optional()])
|
||||
license_end_date = DateField('License End Date', id='form-control datepicker', validators=[Optional()])
|
||||
allowed_monthly_interactions = IntegerField('Allowed Monthly Interactions', validators=[NumberRange(min=0)])
|
||||
# Embedding variables
|
||||
html_tags = StringField('HTML Tags', validators=[DataRequired()],
|
||||
default='p, h1, h2, h3, h4, h5, h6, li')
|
||||
@@ -57,6 +58,8 @@ class TenantForm(FlaskForm):
|
||||
# initialise language fields
|
||||
self.default_language.choices = [(lang, lang.lower()) for lang in current_app.config['SUPPORTED_LANGUAGES']]
|
||||
self.allowed_languages.choices = [(lang, lang.lower()) for lang in current_app.config['SUPPORTED_LANGUAGES']]
|
||||
# initialise currency field
|
||||
self.currency.choices = [(curr, curr) for curr in current_app.config['SUPPORTED_CURRENCIES']]
|
||||
# initialise timezone
|
||||
self.timezone.choices = [(tz, tz) for tz in pytz.all_timezones]
|
||||
# initialise LLM fields
|
||||
@@ -65,6 +68,7 @@ class TenantForm(FlaskForm):
|
||||
# Initialize fallback algorithms
|
||||
self.fallback_algorithms.choices = \
|
||||
[(algorithm, algorithm.lower()) for algorithm in current_app.config['FALLBACK_ALGORITHMS']]
|
||||
self.type.choices = [(t, t) for t in current_app.config['TENANT_TYPES']]
|
||||
|
||||
|
||||
class BaseUserForm(FlaskForm):
|
||||
@@ -107,4 +111,14 @@ class TenantDomainForm(FlaskForm):
|
||||
submit = SubmitField('Add Domain')
|
||||
|
||||
|
||||
class TenantSelectionForm(FlaskForm):
|
||||
types = SelectMultipleField('Tenant Types', choices=[], validators=[Optional()])
|
||||
search = StringField('Search', validators=[Optional()])
|
||||
submit = SubmitField('Filter')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TenantSelectionForm, self).__init__(*args, **kwargs)
|
||||
self.types.choices = [(t, t) for t in current_app.config['TENANT_TYPES']]
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import ast
|
||||
from common.models.user import User, Tenant, Role, TenantDomain
|
||||
from common.extensions import db, security, minio_client, simple_encryption
|
||||
from common.utils.security_utils import send_confirmation_email, send_reset_email
|
||||
from .user_forms import TenantForm, CreateUserForm, EditUserForm, TenantDomainForm
|
||||
from .user_forms import TenantForm, CreateUserForm, EditUserForm, TenantDomainForm, TenantSelectionForm
|
||||
from common.utils.database import Database
|
||||
from common.utils.view_assistants import prepare_table_for_macro, form_validation_failed
|
||||
from common.utils.simple_encryption import generate_api_key
|
||||
@@ -47,18 +47,6 @@ def tenant():
|
||||
# Handle the required attributes
|
||||
new_tenant = Tenant()
|
||||
form.populate_obj(new_tenant)
|
||||
# new_tenant = Tenant(name=form.name.data,
|
||||
# website=form.website.data,
|
||||
# default_language=form.default_language.data,
|
||||
# allowed_languages=form.allowed_languages.data,
|
||||
# timezone=form.timezone.data,
|
||||
# embedding_model=form.embedding_model.data,
|
||||
# llm_model=form.llm_model.data,
|
||||
# license_start_date=form.license_start_date.data,
|
||||
# license_end_date=form.license_end_date.data,
|
||||
# allowed_monthly_interactions=form.allowed_monthly_interactions.data,
|
||||
# embed_tuning=form.embed_tuning.data,
|
||||
# rag_tuning=form.rag_tuning.data)
|
||||
|
||||
# Handle Embedding Variables
|
||||
new_tenant.html_tags = [tag.strip() for tag in form.html_tags.data.split(',')] if form.html_tags.data else []
|
||||
@@ -87,7 +75,7 @@ def tenant():
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'Failed to add tenant to database. Error: {str(e)}')
|
||||
flash(f'Failed to add tenant to database. Error: {str(e)}')
|
||||
flash(f'Failed to add tenant to database. Error: {str(e)}', 'danger')
|
||||
return render_template('user/tenant.html', form=form)
|
||||
|
||||
current_app.logger.info(f"Successfully created tenant {new_tenant.id} in Database")
|
||||
@@ -129,6 +117,7 @@ def edit_tenant(tenant_id):
|
||||
form.html_excluded_classes.data = ', '.join(tenant.html_excluded_classes)
|
||||
|
||||
if form.validate_on_submit():
|
||||
current_app.logger.debug(f'Updating tenant {tenant_id}')
|
||||
# Populate the tenant with form data
|
||||
form.populate_obj(tenant)
|
||||
# Then handle the special fields manually
|
||||
@@ -148,9 +137,10 @@ def edit_tenant(tenant_id):
|
||||
session['tenant'] = tenant.to_dict()
|
||||
# return redirect(url_for(f"user/tenant/tenant_id"))
|
||||
else:
|
||||
current_app.logger.debug(f'Tenant update failed with errors: {form.errors}')
|
||||
form_validation_failed(request, form)
|
||||
|
||||
return render_template('user/edit_tenant.html', form=form, tenant_id=tenant_id)
|
||||
return render_template('user/tenant.html', form=form, tenant_id=tenant_id)
|
||||
|
||||
|
||||
@user_bp.route('/user', methods=['GET', 'POST'])
|
||||
@@ -245,20 +235,29 @@ def edit_user(user_id):
|
||||
return render_template('user/edit_user.html', form=form, user_id=user_id)
|
||||
|
||||
|
||||
@user_bp.route('/select_tenant')
|
||||
@user_bp.route('/select_tenant', methods=['GET', 'POST'])
|
||||
@roles_required('Super User')
|
||||
def select_tenant():
|
||||
filter_form = TenantSelectionForm(request.form)
|
||||
page = request.args.get('page', 1, type=int)
|
||||
per_page = request.args.get('per_page', 10, type=int)
|
||||
|
||||
query = Tenant.query.order_by(Tenant.name) # Fetch all tenants from the database
|
||||
query = Tenant.query
|
||||
|
||||
pagination = query.paginate(page=page, per_page=per_page)
|
||||
if filter_form.validate_on_submit():
|
||||
if filter_form.types.data:
|
||||
query = query.filter(Tenant.type.in_(filter_form.types.data))
|
||||
if filter_form.search.data:
|
||||
search = f"%{filter_form.search.data}%"
|
||||
query = query.filter(Tenant.name.ilike(search))
|
||||
|
||||
query = query.order_by(Tenant.name)
|
||||
pagination = query.paginate(page=page, per_page=per_page, error_out=False)
|
||||
tenants = pagination.items
|
||||
|
||||
rows = prepare_table_for_macro(tenants, [('id', ''), ('name', ''), ('website', '')])
|
||||
rows = prepare_table_for_macro(tenants, [('id', ''), ('name', ''), ('website', ''), ('type', '')])
|
||||
|
||||
return render_template('user/select_tenant.html', rows=rows, pagination=pagination)
|
||||
return render_template('user/select_tenant.html', rows=rows, pagination=pagination, filter_form=filter_form)
|
||||
|
||||
|
||||
@user_bp.route('/handle_tenant_selection', methods=['POST'])
|
||||
|
||||
44
eveai_beat/__init__.py
Normal file
44
eveai_beat/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import logging
|
||||
import logging.config
|
||||
from flask import Flask
|
||||
import os
|
||||
|
||||
from common.utils.celery_utils import make_celery, init_celery
|
||||
from config.logging_config import LOGGING
|
||||
from config.config import get_config
|
||||
|
||||
|
||||
def create_app(config_file=None):
|
||||
app = Flask(__name__)
|
||||
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
|
||||
match environment:
|
||||
case 'development':
|
||||
app.config.from_object(get_config('dev'))
|
||||
case 'production':
|
||||
app.config.from_object(get_config('prod'))
|
||||
case _:
|
||||
app.config.from_object(get_config('dev'))
|
||||
|
||||
logging.config.dictConfig(LOGGING)
|
||||
|
||||
register_extensions(app)
|
||||
|
||||
celery = make_celery(app.name, app.config)
|
||||
init_celery(celery, app, is_beat=True)
|
||||
|
||||
from . import schedule
|
||||
celery.conf.beat_schedule = schedule.beat_schedule
|
||||
|
||||
app.logger.info("EveAI Beat Scheduler Started Successfully")
|
||||
app.logger.info("-------------------------------------------------------------------------------------------------")
|
||||
|
||||
return app, celery
|
||||
|
||||
|
||||
def register_extensions(app):
|
||||
pass
|
||||
|
||||
|
||||
app, celery = create_app()
|
||||
17
eveai_beat/schedule.py
Normal file
17
eveai_beat/schedule.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from celery.schedules import crontab
|
||||
|
||||
# Define the Celery beat schedule here
|
||||
beat_schedule = {
|
||||
'update-tenant-usages-every-hour': {
|
||||
'task': 'update_usages',
|
||||
'schedule': crontab(minute='0'), # Runs every hour
|
||||
'args': (),
|
||||
'options': {'queue': 'entitlements'}
|
||||
},
|
||||
# 'send-invoices-every-month': {
|
||||
# 'task': 'send_invoices',
|
||||
# 'schedule': crontab(day_of_month=1, hour=0, minute=0), # Runs on the 1st of every month
|
||||
# 'args': ()
|
||||
# },
|
||||
# Add more schedules as needed
|
||||
}
|
||||
@@ -3,7 +3,7 @@ import logging.config
|
||||
from flask import Flask, jsonify
|
||||
import os
|
||||
|
||||
from common.extensions import db, socketio, jwt, cors, session, simple_encryption
|
||||
from common.extensions import db, socketio, jwt, cors, session, simple_encryption, metrics
|
||||
from config.logging_config import LOGGING
|
||||
from eveai_chat.socket_handlers import chat_handler
|
||||
from common.utils.cors_utils import create_cors_after_request
|
||||
@@ -32,17 +32,6 @@ def create_app(config_file=None):
|
||||
app.celery = make_celery(app.name, app.config)
|
||||
init_celery(app.celery, app)
|
||||
|
||||
# Register Blueprints
|
||||
# register_blueprints(app)
|
||||
|
||||
@app.route('/ping')
|
||||
def ping():
|
||||
return 'pong'
|
||||
|
||||
@app.route('/health', methods=['GET'])
|
||||
def health():
|
||||
return jsonify({'status': 'ok'}), 200
|
||||
|
||||
app.logger.info("EveAI Chat Server Started Successfully")
|
||||
app.logger.info("-------------------------------------------------------------------------------------------------")
|
||||
return app
|
||||
@@ -61,8 +50,8 @@ def register_extensions(app):
|
||||
ping_interval=app.config.get('SOCKETIO_PING_INTERVAL'),
|
||||
)
|
||||
jwt.init_app(app)
|
||||
# kms_client.init_app(app)
|
||||
simple_encryption.init_app(app)
|
||||
metrics.init_app(app)
|
||||
|
||||
# Cors setup
|
||||
cors.init_app(app, resources={r"/chat/*": {"origins": "*"}})
|
||||
@@ -72,5 +61,5 @@ def register_extensions(app):
|
||||
|
||||
|
||||
def register_blueprints(app):
|
||||
from .views.chat_views import chat_bp
|
||||
app.register_blueprint(chat_bp)
|
||||
from views.healthz_views import healthz_bp
|
||||
app.register_blueprint(healthz_bp)
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import uuid
|
||||
from functools import wraps
|
||||
|
||||
from flask_jwt_extended import create_access_token, get_jwt_identity, verify_jwt_in_request, decode_token
|
||||
from flask_socketio import emit, disconnect, join_room, leave_room
|
||||
from flask import current_app, request, session
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from datetime import datetime, timedelta
|
||||
from prometheus_client import Counter, Histogram
|
||||
from time import time
|
||||
|
||||
from common.extensions import socketio, db, simple_encryption
|
||||
from common.models.user import Tenant
|
||||
@@ -12,8 +15,27 @@ from common.models.interaction import Interaction
|
||||
from common.utils.celery_utils import current_celery
|
||||
from common.utils.database import Database
|
||||
|
||||
# Define custom metrics
|
||||
socketio_message_counter = Counter('socketio_message_count', 'Count of SocketIO messages', ['event_type'])
|
||||
socketio_message_latency = Histogram('socketio_message_latency_seconds', 'Latency of SocketIO message processing', ['event_type'])
|
||||
|
||||
|
||||
# Decorator to measure SocketIO events
|
||||
def track_socketio_event(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
event_type = func.__name__
|
||||
socketio_message_counter.labels(event_type=event_type).inc()
|
||||
start_time = time()
|
||||
result = func(*args, **kwargs)
|
||||
latency = time() - start_time
|
||||
socketio_message_latency.labels(event_type=event_type).observe(latency)
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
|
||||
@socketio.on('connect')
|
||||
@track_socketio_event
|
||||
def handle_connect():
|
||||
try:
|
||||
current_app.logger.debug(f'SocketIO: Connection handling started using {request.args}')
|
||||
@@ -58,6 +80,7 @@ def handle_connect():
|
||||
|
||||
|
||||
@socketio.on('disconnect')
|
||||
@track_socketio_event
|
||||
def handle_disconnect():
|
||||
room = session.get('room')
|
||||
if room:
|
||||
@@ -86,14 +109,16 @@ def handle_message(data):
|
||||
room = session.get('room')
|
||||
|
||||
# Offload actual processing of question
|
||||
task = current_celery.send_task('ask_question', queue='llm_interactions', args=[
|
||||
current_tenant_id,
|
||||
data['message'],
|
||||
data['language'],
|
||||
session['session_id'],
|
||||
data['timezone'],
|
||||
room
|
||||
])
|
||||
task = current_celery.send_task('ask_question',
|
||||
queue='llm_interactions',
|
||||
args=[
|
||||
current_tenant_id,
|
||||
data['message'],
|
||||
data['language'],
|
||||
session['session_id'],
|
||||
data['timezone'],
|
||||
room
|
||||
])
|
||||
current_app.logger.debug(f'SocketIO: Message offloading for tenant {current_tenant_id}, '
|
||||
f'Question: {task.id}')
|
||||
response = {
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from flask import request, redirect, url_for, render_template, Blueprint, session, current_app, jsonify
|
||||
from flask_security import hash_password, roles_required, roles_accepted
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from flask_jwt_extended import create_access_token, jwt_required, get_jwt_identity
|
||||
from flask_socketio import emit, join_room, leave_room
|
||||
import ast
|
||||
|
||||
|
||||
from common.models.user import User, Tenant
|
||||
from common.models.interaction import ChatSession, Interaction, InteractionEmbedding
|
||||
from common.models.document import Embedding
|
||||
from common.extensions import db, socketio, kms_client
|
||||
from common.utils.database import Database
|
||||
|
||||
chat_bp = Blueprint('chat_bp', __name__, url_prefix='/chat')
|
||||
|
||||
|
||||
@chat_bp.route('/register_client', methods=['POST'])
|
||||
def register_client():
|
||||
tenant_id = request.json.get('tenant_id')
|
||||
api_key = request.json.get('api_key')
|
||||
|
||||
# Validate tenant_id and api_key here (e.g., check against the database)
|
||||
if validate_tenant(tenant_id, api_key):
|
||||
access_token = create_access_token(identity={'tenant_id': tenant_id, 'api_key': api_key})
|
||||
current_app.logger.debug(f'Tenant Registration: Tenant {tenant_id} registered successfully')
|
||||
return jsonify({'token': access_token}), 200
|
||||
else:
|
||||
current_app.logger.debug(f'Tenant Registration: Invalid tenant_id ({tenant_id}) or api_key ({api_key})')
|
||||
return jsonify({'message': 'Invalid credentials'}), 401
|
||||
|
||||
|
||||
@socketio.on('connect', namespace='/chat')
|
||||
@jwt_required()
|
||||
def handle_connect():
|
||||
current_tenant = get_jwt_identity()
|
||||
current_app.logger.debug(f'Tenant {current_tenant["tenant_id"]} connected')
|
||||
|
||||
|
||||
@socketio.on('message', namespace='/chat')
|
||||
@jwt_required()
|
||||
def handle_message(data):
|
||||
current_tenant = get_jwt_identity()
|
||||
current_app.logger.debug(f'Tenant {current_tenant["tenant_id"]} sent a message: {data}')
|
||||
# Store interaction in the database
|
||||
emit('response', {'data': 'Message received'}, broadcast=True)
|
||||
|
||||
|
||||
def validate_tenant(tenant_id, api_key):
|
||||
tenant = Tenant.query.get_or_404(tenant_id)
|
||||
encrypted_api_key = ast.literal_eval(tenant.encrypted_chat_api_key)
|
||||
|
||||
decrypted_api_key = kms_client.decrypt_api_key(encrypted_api_key)
|
||||
|
||||
return decrypted_api_key == api_key
|
||||
|
||||
|
||||
|
||||
# @chat_bp.route('/', methods=['GET', 'POST'])
|
||||
# def chat():
|
||||
# return render_template('chat.html')
|
||||
#
|
||||
#
|
||||
# @chat.record_once
|
||||
# def on_register(state):
|
||||
# # TODO: write initialisation code when the blueprint is registered (only once)
|
||||
# # socketio.init_app(state.app)
|
||||
# pass
|
||||
#
|
||||
#
|
||||
# @socketio.on('message', namespace='/chat')
|
||||
# def handle_message(message):
|
||||
# # TODO: write message handling code to actually realise chat
|
||||
# # print('Received message:', message)
|
||||
# # socketio.emit('response', {'data': message}, namespace='/chat')
|
||||
# pass
|
||||
70
eveai_chat/views/healthz_views.py
Normal file
70
eveai_chat/views/healthz_views.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from flask import Blueprint, current_app, request
|
||||
from flask_healthz import HealthError
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from celery.exceptions import TimeoutError as CeleryTimeoutError
|
||||
from common.extensions import db, metrics, minio_client
|
||||
from common.utils.celery_utils import current_celery
|
||||
from eveai_chat.socket_handlers.chat_handler import socketio_message_counter, socketio_message_latency
|
||||
|
||||
healthz_bp = Blueprint('healthz', __name__, url_prefix='/_healthz')
|
||||
|
||||
|
||||
def liveness():
|
||||
try:
|
||||
# Basic check to see if the app is running
|
||||
return True
|
||||
except Exception:
|
||||
raise HealthError("Liveness check failed")
|
||||
|
||||
|
||||
def readiness():
|
||||
checks = {
|
||||
"database": check_database(),
|
||||
"celery": check_celery(),
|
||||
# Add more checks as needed
|
||||
}
|
||||
|
||||
if not all(checks.values()):
|
||||
raise HealthError("Readiness check failed")
|
||||
|
||||
|
||||
def check_database():
|
||||
try:
|
||||
# Perform a simple database query
|
||||
db.session.execute("SELECT 1")
|
||||
return True
|
||||
except SQLAlchemyError:
|
||||
current_app.logger.error("Database check failed", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def check_celery():
|
||||
try:
|
||||
# Send a simple task to Celery
|
||||
result = current_celery.send_task('ping', queue='llm_interactions')
|
||||
response = result.get(timeout=10) # Wait for up to 10 seconds for a response
|
||||
return response == 'pong'
|
||||
except CeleryTimeoutError:
|
||||
current_app.logger.error("Celery check timed out", exc_info=True)
|
||||
return False
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Celery check failed: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
@healthz_bp.route('/metrics')
|
||||
@metrics.do_not_track()
|
||||
def prometheus_metrics():
|
||||
return metrics.generate_latest()
|
||||
|
||||
|
||||
def init_healtz(app):
|
||||
app.config.update(
|
||||
HEALTHZ={
|
||||
"live": "healthz_views.liveness",
|
||||
"ready": "healthz_views.readiness",
|
||||
}
|
||||
)
|
||||
# Register SocketIO metrics with Prometheus
|
||||
metrics.register(socketio_message_counter)
|
||||
metrics.register(socketio_message_latency)
|
||||
@@ -22,12 +22,23 @@ from common.models.interaction import ChatSession, Interaction, InteractionEmbed
|
||||
from common.extensions import db
|
||||
from common.utils.celery_utils import current_celery
|
||||
from common.utils.model_utils import select_model_variables, create_language_template, replace_variable_in_template
|
||||
from common.langchain.EveAIRetriever import EveAIRetriever
|
||||
from common.langchain.EveAIHistoryRetriever import EveAIHistoryRetriever
|
||||
from common.langchain.eveai_retriever import EveAIRetriever
|
||||
from common.langchain.eveai_history_retriever import EveAIHistoryRetriever
|
||||
from common.utils.business_event import BusinessEvent
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
# Healthcheck task
|
||||
@current_celery.task(name='ping', queue='llm_interactions')
|
||||
def ping():
|
||||
return 'pong'
|
||||
|
||||
|
||||
def detail_question(question, language, model_variables, session_id):
|
||||
retriever = EveAIHistoryRetriever(model_variables, session_id)
|
||||
current_app.logger.debug(f'Detail question: {question}')
|
||||
current_app.logger.debug(f'model_varialbes: {model_variables}')
|
||||
current_app.logger.debug(f'session_id: {session_id}')
|
||||
retriever = EveAIHistoryRetriever(model_variables=model_variables, session_id=session_id)
|
||||
llm = model_variables['llm']
|
||||
template = model_variables['history_template']
|
||||
language_template = create_language_template(template, language)
|
||||
@@ -56,53 +67,56 @@ def ask_question(tenant_id, question, language, session_id, user_timezone, room)
|
||||
'interaction_id': 'interaction_id_value'
|
||||
}
|
||||
"""
|
||||
current_app.logger.info(f'ask_question: Received question for tenant {tenant_id}: {question}. Processing...')
|
||||
with BusinessEvent("Ask Question", tenant_id=tenant_id, chat_session_id=session_id):
|
||||
current_app.logger.info(f'ask_question: Received question for tenant {tenant_id}: {question}. Processing...')
|
||||
|
||||
try:
|
||||
# Retrieve the tenant
|
||||
tenant = Tenant.query.get(tenant_id)
|
||||
if not tenant:
|
||||
raise Exception(f'Tenant {tenant_id} not found.')
|
||||
try:
|
||||
# Retrieve the tenant
|
||||
tenant = Tenant.query.get(tenant_id)
|
||||
if not tenant:
|
||||
raise Exception(f'Tenant {tenant_id} not found.')
|
||||
|
||||
# Ensure we are working in the correct database schema
|
||||
Database(tenant_id).switch_schema()
|
||||
# Ensure we are working in the correct database schema
|
||||
Database(tenant_id).switch_schema()
|
||||
|
||||
# Ensure we have a session to story history
|
||||
chat_session = ChatSession.query.filter_by(session_id=session_id).first()
|
||||
if not chat_session:
|
||||
try:
|
||||
chat_session = ChatSession()
|
||||
chat_session.session_id = session_id
|
||||
chat_session.session_start = dt.now(tz.utc)
|
||||
chat_session.timezone = user_timezone
|
||||
db.session.add(chat_session)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'ask_question: Error initializing chat session in database: {e}')
|
||||
raise
|
||||
# Ensure we have a session to story history
|
||||
chat_session = ChatSession.query.filter_by(session_id=session_id).first()
|
||||
if not chat_session:
|
||||
try:
|
||||
chat_session = ChatSession()
|
||||
chat_session.session_id = session_id
|
||||
chat_session.session_start = dt.now(tz.utc)
|
||||
chat_session.timezone = user_timezone
|
||||
db.session.add(chat_session)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'ask_question: Error initializing chat session in database: {e}')
|
||||
raise
|
||||
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'Received question for tenant {tenant_id}:\n{question}. Processing...')
|
||||
current_app.rag_tuning_logger.debug(f'Tenant Information: \n{tenant.to_dict()}')
|
||||
current_app.rag_tuning_logger.debug(f'===================================================================')
|
||||
current_app.rag_tuning_logger.debug(f'===================================================================')
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'Received question for tenant {tenant_id}:\n{question}. Processing...')
|
||||
current_app.rag_tuning_logger.debug(f'Tenant Information: \n{tenant.to_dict()}')
|
||||
current_app.rag_tuning_logger.debug(f'===================================================================')
|
||||
current_app.rag_tuning_logger.debug(f'===================================================================')
|
||||
|
||||
result, interaction = answer_using_tenant_rag(question, language, tenant, chat_session)
|
||||
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
|
||||
result['interaction_id'] = interaction.id
|
||||
result['room'] = room # Include the room in the result
|
||||
|
||||
if result['insufficient_info']:
|
||||
if 'LLM' in tenant.fallback_algorithms:
|
||||
result, interaction = answer_using_llm(question, language, tenant, chat_session)
|
||||
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['LLM']['name']
|
||||
with current_event.create_span("RAG Answer"):
|
||||
result, interaction = answer_using_tenant_rag(question, language, tenant, chat_session)
|
||||
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['RAG_TENANT']['name']
|
||||
result['interaction_id'] = interaction.id
|
||||
result['room'] = room # Include the room in the result
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'ask_question: Error processing question: {e}')
|
||||
raise
|
||||
if result['insufficient_info']:
|
||||
if 'LLM' in tenant.fallback_algorithms:
|
||||
with current_event.create_span("Fallback Algorithm LLM"):
|
||||
result, interaction = answer_using_llm(question, language, tenant, chat_session)
|
||||
result['algorithm'] = current_app.config['INTERACTION_ALGORITHMS']['LLM']['name']
|
||||
result['interaction_id'] = interaction.id
|
||||
result['room'] = room # Include the room in the result
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'ask_question: Error processing question: {e}')
|
||||
raise
|
||||
|
||||
|
||||
def answer_using_tenant_rag(question, language, tenant, chat_session):
|
||||
@@ -122,92 +136,94 @@ def answer_using_tenant_rag(question, language, tenant, chat_session):
|
||||
# Langchain debugging if required
|
||||
# set_debug(True)
|
||||
|
||||
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
||||
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'Detailed Question for tenant {tenant.id}:\n{question}.')
|
||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||
new_interaction.detailed_question = detailed_question
|
||||
new_interaction.detailed_question_at = dt.now(tz.utc)
|
||||
|
||||
retriever = EveAIRetriever(model_variables, tenant_info)
|
||||
llm = model_variables['llm']
|
||||
template = model_variables['rag_template']
|
||||
language_template = create_language_template(template, language)
|
||||
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
|
||||
rag_prompt = ChatPromptTemplate.from_template(full_template)
|
||||
setup_and_retrieval = RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'Full prompt for tenant {tenant.id}:\n{full_template}.')
|
||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||
|
||||
new_interaction_embeddings = []
|
||||
if not model_variables['cited_answer_cls']: # The model doesn't support structured feedback
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
chain = setup_and_retrieval | rag_prompt | llm | output_parser
|
||||
|
||||
# Invoke the chain with the actual question
|
||||
answer = chain.invoke(detailed_question)
|
||||
new_interaction.answer = answer
|
||||
result = {
|
||||
'answer': answer,
|
||||
'citations': [],
|
||||
'insufficient_info': False
|
||||
}
|
||||
|
||||
else: # The model supports structured feedback
|
||||
structured_llm = llm.with_structured_output(model_variables['cited_answer_cls'])
|
||||
|
||||
chain = setup_and_retrieval | rag_prompt | structured_llm
|
||||
|
||||
result = chain.invoke(detailed_question).dict()
|
||||
current_app.logger.debug(f'ask_question: result answer: {result['answer']}')
|
||||
current_app.logger.debug(f'ask_question: result citations: {result["citations"]}')
|
||||
current_app.logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
||||
with current_event.create_span("Detail Question"):
|
||||
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
||||
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'ask_question: result answer: {result['answer']}')
|
||||
current_app.rag_tuning_logger.debug(f'ask_question: result citations: {result["citations"]}')
|
||||
current_app.rag_tuning_logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
||||
current_app.rag_tuning_logger.debug(f'Detailed Question for tenant {tenant.id}:\n{question}.')
|
||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||
new_interaction.answer = result['answer']
|
||||
new_interaction.detailed_question = detailed_question
|
||||
new_interaction.detailed_question_at = dt.now(tz.utc)
|
||||
|
||||
# Filter out the existing Embedding IDs
|
||||
given_embedding_ids = [int(emb_id) for emb_id in result['citations']]
|
||||
embeddings = (
|
||||
db.session.query(Embedding)
|
||||
.filter(Embedding.id.in_(given_embedding_ids))
|
||||
.all()
|
||||
)
|
||||
existing_embedding_ids = [emb.id for emb in embeddings]
|
||||
urls = list(set(emb.document_version.url for emb in embeddings))
|
||||
with current_event.create_span("Generate Answer using RAG"):
|
||||
retriever = EveAIRetriever(model_variables, tenant_info)
|
||||
llm = model_variables['llm']
|
||||
template = model_variables['rag_template']
|
||||
language_template = create_language_template(template, language)
|
||||
full_template = replace_variable_in_template(language_template, "{tenant_context}", model_variables['rag_context'])
|
||||
rag_prompt = ChatPromptTemplate.from_template(full_template)
|
||||
setup_and_retrieval = RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'Referenced documents for answer for tenant {tenant.id}:\n')
|
||||
current_app.rag_tuning_logger.debug(f'{urls}')
|
||||
current_app.rag_tuning_logger.debug(f'Full prompt for tenant {tenant.id}:\n{full_template}.')
|
||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||
|
||||
for emb_id in existing_embedding_ids:
|
||||
new_interaction_embedding = InteractionEmbedding(embedding_id=emb_id)
|
||||
new_interaction_embedding.interaction = new_interaction
|
||||
new_interaction_embeddings.append(new_interaction_embedding)
|
||||
new_interaction_embeddings = []
|
||||
if not model_variables['cited_answer_cls']: # The model doesn't support structured feedback
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
result['citations'] = urls
|
||||
chain = setup_and_retrieval | rag_prompt | llm | output_parser
|
||||
|
||||
# Disable langchain debugging if set above.
|
||||
# set_debug(False)
|
||||
# Invoke the chain with the actual question
|
||||
answer = chain.invoke(detailed_question)
|
||||
new_interaction.answer = answer
|
||||
result = {
|
||||
'answer': answer,
|
||||
'citations': [],
|
||||
'insufficient_info': False
|
||||
}
|
||||
|
||||
new_interaction.answer_at = dt.now(tz.utc)
|
||||
chat_session.session_end = dt.now(tz.utc)
|
||||
else: # The model supports structured feedback
|
||||
structured_llm = llm.with_structured_output(model_variables['cited_answer_cls'])
|
||||
|
||||
try:
|
||||
db.session.add(chat_session)
|
||||
db.session.add(new_interaction)
|
||||
db.session.add_all(new_interaction_embeddings)
|
||||
db.session.commit()
|
||||
return result, new_interaction
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
||||
raise
|
||||
chain = setup_and_retrieval | rag_prompt | structured_llm
|
||||
|
||||
result = chain.invoke(detailed_question).dict()
|
||||
current_app.logger.debug(f'ask_question: result answer: {result['answer']}')
|
||||
current_app.logger.debug(f'ask_question: result citations: {result["citations"]}')
|
||||
current_app.logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'ask_question: result answer: {result['answer']}')
|
||||
current_app.rag_tuning_logger.debug(f'ask_question: result citations: {result["citations"]}')
|
||||
current_app.rag_tuning_logger.debug(f'ask_question: insufficient information: {result["insufficient_info"]}')
|
||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||
new_interaction.answer = result['answer']
|
||||
|
||||
# Filter out the existing Embedding IDs
|
||||
given_embedding_ids = [int(emb_id) for emb_id in result['citations']]
|
||||
embeddings = (
|
||||
db.session.query(Embedding)
|
||||
.filter(Embedding.id.in_(given_embedding_ids))
|
||||
.all()
|
||||
)
|
||||
existing_embedding_ids = [emb.id for emb in embeddings]
|
||||
urls = list(set(emb.document_version.url for emb in embeddings))
|
||||
if tenant.rag_tuning:
|
||||
current_app.rag_tuning_logger.debug(f'Referenced documents for answer for tenant {tenant.id}:\n')
|
||||
current_app.rag_tuning_logger.debug(f'{urls}')
|
||||
current_app.rag_tuning_logger.debug(f'-------------------------------------------------------------------')
|
||||
|
||||
for emb_id in existing_embedding_ids:
|
||||
new_interaction_embedding = InteractionEmbedding(embedding_id=emb_id)
|
||||
new_interaction_embedding.interaction = new_interaction
|
||||
new_interaction_embeddings.append(new_interaction_embedding)
|
||||
|
||||
result['citations'] = urls
|
||||
|
||||
# Disable langchain debugging if set above.
|
||||
# set_debug(False)
|
||||
|
||||
new_interaction.answer_at = dt.now(tz.utc)
|
||||
chat_session.session_end = dt.now(tz.utc)
|
||||
|
||||
try:
|
||||
db.session.add(chat_session)
|
||||
db.session.add(new_interaction)
|
||||
db.session.add_all(new_interaction_embeddings)
|
||||
db.session.commit()
|
||||
return result, new_interaction
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
||||
raise
|
||||
|
||||
|
||||
def answer_using_llm(question, language, tenant, chat_session):
|
||||
@@ -227,47 +243,49 @@ def answer_using_llm(question, language, tenant, chat_session):
|
||||
# Langchain debugging if required
|
||||
# set_debug(True)
|
||||
|
||||
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
||||
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
||||
new_interaction.detailed_question = detailed_question
|
||||
new_interaction.detailed_question_at = dt.now(tz.utc)
|
||||
with current_event.create_span("Detail Question"):
|
||||
detailed_question = detail_question(question, language, model_variables, chat_session.session_id)
|
||||
current_app.logger.debug(f'Original question:\n {question}\n\nDetailed question: {detailed_question}')
|
||||
new_interaction.detailed_question = detailed_question
|
||||
new_interaction.detailed_question_at = dt.now(tz.utc)
|
||||
|
||||
retriever = EveAIRetriever(model_variables, tenant_info)
|
||||
llm = model_variables['llm_no_rag']
|
||||
template = model_variables['encyclopedia_template']
|
||||
language_template = create_language_template(template, language)
|
||||
rag_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
setup = RunnablePassthrough()
|
||||
output_parser = StrOutputParser()
|
||||
with current_event.create_span("Detail Answer using LLM"):
|
||||
retriever = EveAIRetriever(model_variables, tenant_info)
|
||||
llm = model_variables['llm_no_rag']
|
||||
template = model_variables['encyclopedia_template']
|
||||
language_template = create_language_template(template, language)
|
||||
rag_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
setup = RunnablePassthrough()
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
new_interaction_embeddings = []
|
||||
new_interaction_embeddings = []
|
||||
|
||||
chain = setup | rag_prompt | llm | output_parser
|
||||
input_question = {"question": detailed_question}
|
||||
chain = setup | rag_prompt | llm | output_parser
|
||||
input_question = {"question": detailed_question}
|
||||
|
||||
# Invoke the chain with the actual question
|
||||
answer = chain.invoke(input_question)
|
||||
new_interaction.answer = answer
|
||||
result = {
|
||||
'answer': answer,
|
||||
'citations': [],
|
||||
'insufficient_info': False
|
||||
}
|
||||
# Invoke the chain with the actual question
|
||||
answer = chain.invoke(input_question)
|
||||
new_interaction.answer = answer
|
||||
result = {
|
||||
'answer': answer,
|
||||
'citations': [],
|
||||
'insufficient_info': False
|
||||
}
|
||||
|
||||
# Disable langchain debugging if set above.
|
||||
# set_debug(False)
|
||||
# Disable langchain debugging if set above.
|
||||
# set_debug(False)
|
||||
|
||||
new_interaction.answer_at = dt.now(tz.utc)
|
||||
chat_session.session_end = dt.now(tz.utc)
|
||||
new_interaction.answer_at = dt.now(tz.utc)
|
||||
chat_session.session_end = dt.now(tz.utc)
|
||||
|
||||
try:
|
||||
db.session.add(chat_session)
|
||||
db.session.add(new_interaction)
|
||||
db.session.commit()
|
||||
return result, new_interaction
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
||||
raise
|
||||
try:
|
||||
db.session.add(chat_session)
|
||||
db.session.add(new_interaction)
|
||||
db.session.commit()
|
||||
return result, new_interaction
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'ask_question: Error saving interaction to database: {e}')
|
||||
raise
|
||||
|
||||
|
||||
def tasks_ping():
|
||||
|
||||
44
eveai_entitlements/__init__.py
Normal file
44
eveai_entitlements/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import logging
|
||||
import logging.config
|
||||
from flask import Flask
|
||||
import os
|
||||
|
||||
from common.utils.celery_utils import make_celery, init_celery
|
||||
from common.extensions import db, minio_client
|
||||
from config.logging_config import LOGGING
|
||||
from config.config import get_config
|
||||
|
||||
|
||||
def create_app(config_file=None):
|
||||
app = Flask(__name__)
|
||||
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
|
||||
match environment:
|
||||
case 'development':
|
||||
app.config.from_object(get_config('dev'))
|
||||
case 'production':
|
||||
app.config.from_object(get_config('prod'))
|
||||
case _:
|
||||
app.config.from_object(get_config('dev'))
|
||||
|
||||
logging.config.dictConfig(LOGGING)
|
||||
|
||||
register_extensions(app)
|
||||
|
||||
celery = make_celery(app.name, app.config)
|
||||
init_celery(celery, app)
|
||||
|
||||
from . import tasks
|
||||
|
||||
app.logger.info("EveAI Entitlements Server Started Successfully")
|
||||
app.logger.info("-------------------------------------------------------------------------------------------------")
|
||||
|
||||
return app, celery
|
||||
|
||||
|
||||
def register_extensions(app):
|
||||
db.init_app(app)
|
||||
|
||||
|
||||
app, celery = create_app()
|
||||
253
eveai_entitlements/tasks.py
Normal file
253
eveai_entitlements/tasks.py
Normal file
@@ -0,0 +1,253 @@
|
||||
import io
|
||||
import os
|
||||
from datetime import datetime as dt, timezone as tz, datetime
|
||||
|
||||
from celery import states
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from flask import current_app
|
||||
from sqlalchemy import or_, and_, text
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from common.extensions import db
|
||||
from common.models.user import Tenant
|
||||
from common.models.entitlements import BusinessEventLog, LicenseUsage, License
|
||||
from common.utils.celery_utils import current_celery
|
||||
from common.utils.eveai_exceptions import EveAINoLicenseForTenant, EveAIException
|
||||
from common.utils.database import Database
|
||||
|
||||
|
||||
# Healthcheck task
|
||||
@current_celery.task(name='ping', queue='entitlements')
|
||||
def ping():
|
||||
return 'pong'
|
||||
|
||||
|
||||
@current_celery.task(name='update_usages', queue='entitlements')
|
||||
def update_usages():
|
||||
current_timestamp = dt.now(tz.utc)
|
||||
tenant_ids = get_all_tenant_ids()
|
||||
|
||||
# List to collect all errors
|
||||
error_list = []
|
||||
|
||||
for tenant_id in tenant_ids:
|
||||
try:
|
||||
Database(tenant_id).switch_schema()
|
||||
check_and_create_license_usage_for_tenant(tenant_id)
|
||||
tenant = Tenant.query.get(tenant_id)
|
||||
if tenant.storage_dirty:
|
||||
recalculate_storage_for_tenant(tenant)
|
||||
logs = get_logs_for_processing(tenant_id, current_timestamp)
|
||||
if not logs:
|
||||
continue # If no logs to be processed, continu to the next tenant
|
||||
|
||||
# Get the min and max timestamp from the logs
|
||||
min_timestamp = min(log.timestamp for log in logs)
|
||||
max_timestamp = max(log.timestamp for log in logs)
|
||||
|
||||
# Retrieve relevant LicenseUsage records
|
||||
current_app.logger.debug(f"Searching relevant usages for tenant {tenant_id}")
|
||||
license_usages = get_relevant_license_usages(db.session, tenant_id, min_timestamp, max_timestamp)
|
||||
current_app.logger.debug(f"Found {license_usages}, end searching relevant usages for tenant {tenant_id}")
|
||||
|
||||
# Split logs based on LicenseUsage periods
|
||||
current_app.logger.debug(f"Splitting usages for tenant {tenant_id}")
|
||||
logs_by_usage = split_logs_by_license_usage(logs, license_usages)
|
||||
current_app.logger.debug(f"Found {logs_by_usage}, end splitting logs for tenant {tenant_id}")
|
||||
|
||||
# Now you can process logs for each LicenseUsage
|
||||
for license_usage_id, logs in logs_by_usage.items():
|
||||
current_app.logger.debug(f"Processing logs for usage id {license_usage_id} for tenant {tenant_id}")
|
||||
process_logs_for_license_usage(tenant_id, license_usage_id, logs)
|
||||
current_app.logger.debug(f"Finished processing logs for tenant {tenant_id}")
|
||||
except Exception as e:
|
||||
error = f"Usage Calculation error for Tenant {tenant_id}: {e}"
|
||||
error_list.append(error)
|
||||
current_app.logger.error(error)
|
||||
continue
|
||||
|
||||
if error_list:
|
||||
raise Exception('\n'.join(error_list))
|
||||
|
||||
return "Update Usages taks completed successfully"
|
||||
|
||||
|
||||
def get_all_tenant_ids():
|
||||
tenant_ids = db.session.query(Tenant.id).all()
|
||||
return [tenant_id[0] for tenant_id in tenant_ids] # Extract tenant_id from tuples
|
||||
|
||||
|
||||
def check_and_create_license_usage_for_tenant(tenant_id):
|
||||
current_date = dt.now(tz.utc).date()
|
||||
license_usages = (db.session.query(LicenseUsage)
|
||||
.filter_by(tenant_id=tenant_id)
|
||||
.filter(and_(LicenseUsage.period_start_date <= current_date,
|
||||
LicenseUsage.period_end_date >= current_date))
|
||||
.all())
|
||||
if not license_usages:
|
||||
active_license = (db.session.query(License).filter_by(tenant_id=tenant_id)
|
||||
.filter(and_(License.start_date <= current_date,
|
||||
License.end_date >= current_date))
|
||||
.one_or_none())
|
||||
if not active_license:
|
||||
current_app.logger.error(f"No License defined for {tenant_id}. "
|
||||
f"Impossible to calculate license usage.")
|
||||
raise EveAINoLicenseForTenant(message=f"No License defined for {tenant_id}. "
|
||||
f"Impossible to calculate license usage.")
|
||||
|
||||
start_date, end_date = calculate_valid_period(current_date, active_license.start_date)
|
||||
new_license_usage = LicenseUsage(period_start_date=start_date,
|
||||
period_end_date=end_date,
|
||||
license_id=active_license.id,
|
||||
tenant_id=tenant_id
|
||||
)
|
||||
try:
|
||||
db.session.add(new_license_usage)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f"Error trying to create new license usage for tenant {tenant_id}. "
|
||||
f"Error: {str(e)}")
|
||||
raise e
|
||||
|
||||
|
||||
def calculate_valid_period(given_date, original_start_date):
|
||||
# Ensure both dates are of datetime.date type
|
||||
if isinstance(given_date, datetime):
|
||||
given_date = given_date.date()
|
||||
if isinstance(original_start_date, datetime):
|
||||
original_start_date = original_start_date.date()
|
||||
|
||||
# Step 1: Find the most recent start_date less than or equal to given_date
|
||||
start_date = original_start_date
|
||||
while start_date <= given_date:
|
||||
next_start_date = start_date + relativedelta(months=1)
|
||||
if next_start_date > given_date:
|
||||
break
|
||||
start_date = next_start_date
|
||||
|
||||
# Step 2: Calculate the end_date for this period
|
||||
end_date = start_date + relativedelta(months=1, days=-1)
|
||||
|
||||
# Ensure the given date falls within the period
|
||||
if start_date <= given_date <= end_date:
|
||||
return start_date, end_date
|
||||
else:
|
||||
raise ValueError("Given date does not fall within a valid period.")
|
||||
|
||||
|
||||
def get_logs_for_processing(tenant_id, end_time_stamp):
|
||||
return (db.session.query(BusinessEventLog).filter(
|
||||
BusinessEventLog.tenant_id == tenant_id,
|
||||
BusinessEventLog.license_usage_id == None,
|
||||
BusinessEventLog.timestamp <= end_time_stamp,
|
||||
).all())
|
||||
|
||||
|
||||
def get_relevant_license_usages(session, tenant_id, min_timestamp, max_timestamp):
|
||||
# Fetch LicenseUsage records where the log timestamps fall between period_start_date and period_end_date
|
||||
return session.query(LicenseUsage).filter(
|
||||
LicenseUsage.tenant_id == tenant_id,
|
||||
LicenseUsage.period_start_date <= max_timestamp.date(),
|
||||
LicenseUsage.period_end_date >= min_timestamp.date()
|
||||
).order_by(LicenseUsage.period_start_date).all()
|
||||
|
||||
|
||||
def split_logs_by_license_usage(logs, license_usages):
|
||||
# Dictionary to hold logs categorized by LicenseUsage
|
||||
logs_by_usage = {lu.id: [] for lu in license_usages}
|
||||
|
||||
for log in logs:
|
||||
# Find the corresponding LicenseUsage for each log based on the timestamp
|
||||
for license_usage in license_usages:
|
||||
if license_usage.period_start_date <= log.timestamp.date() <= license_usage.period_end_date:
|
||||
logs_by_usage[license_usage.id].append(log)
|
||||
break
|
||||
|
||||
return logs_by_usage
|
||||
|
||||
|
||||
def process_logs_for_license_usage(tenant_id, license_usage_id, logs):
|
||||
# Retrieve the LicenseUsage record
|
||||
license_usage = db.session.query(LicenseUsage).filter_by(id=license_usage_id).first()
|
||||
|
||||
if not license_usage:
|
||||
raise ValueError(f"LicenseUsage with id {license_usage_id} not found.")
|
||||
|
||||
# Initialize variables to accumulate usage data
|
||||
embedding_mb_used = 0
|
||||
embedding_prompt_tokens_used = 0
|
||||
embedding_completion_tokens_used = 0
|
||||
embedding_total_tokens_used = 0
|
||||
interaction_prompt_tokens_used = 0
|
||||
interaction_completion_tokens_used = 0
|
||||
interaction_total_tokens_used = 0
|
||||
|
||||
# Process each log
|
||||
for log in logs:
|
||||
# Case for 'Create Embeddings' event
|
||||
if log.event_type == 'Create Embeddings':
|
||||
if log.message == 'Starting Trace for Create Embeddings':
|
||||
embedding_mb_used += log.document_version_file_size
|
||||
elif log.message == 'Final LLM Metrics':
|
||||
embedding_prompt_tokens_used += log.llm_metrics_prompt_tokens
|
||||
embedding_completion_tokens_used += log.llm_metrics_completion_tokens
|
||||
embedding_total_tokens_used += log.llm_metrics_total_tokens
|
||||
|
||||
# Case for 'Ask Question' event
|
||||
elif log.event_type == 'Ask Question':
|
||||
if log.message == 'Final LLM Metrics':
|
||||
interaction_prompt_tokens_used += log.llm_metrics_prompt_tokens
|
||||
interaction_completion_tokens_used += log.llm_metrics_completion_tokens
|
||||
interaction_total_tokens_used += log.llm_metrics_total_tokens
|
||||
|
||||
# Mark the log as processed by setting the license_usage_id
|
||||
log.license_usage_id = license_usage_id
|
||||
|
||||
# Update the LicenseUsage record with the accumulated values
|
||||
license_usage.embedding_mb_used += embedding_mb_used
|
||||
license_usage.embedding_prompt_tokens_used += embedding_prompt_tokens_used
|
||||
license_usage.embedding_completion_tokens_used += embedding_completion_tokens_used
|
||||
license_usage.embedding_total_tokens_used += embedding_total_tokens_used
|
||||
license_usage.interaction_prompt_tokens_used += interaction_prompt_tokens_used
|
||||
license_usage.interaction_completion_tokens_used += interaction_completion_tokens_used
|
||||
license_usage.interaction_total_tokens_used += interaction_total_tokens_used
|
||||
|
||||
current_app.logger.debug(f"Processed logs for license usage {license_usage.id}:\n{license_usage}")
|
||||
|
||||
# Commit the updates to the LicenseUsage and log records
|
||||
try:
|
||||
db.session.add(license_usage)
|
||||
for log in logs:
|
||||
db.session.add(log)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f"Error trying to update license usage and logs for tenant {tenant_id}: {e}")
|
||||
raise e
|
||||
|
||||
|
||||
def recalculate_storage_for_tenant(tenant):
|
||||
# Perform a SUM operation to get the total file size from document_versions
|
||||
total_storage = db.session.execute(text(f"""
|
||||
SELECT SUM(file_size)
|
||||
FROM document_version
|
||||
""")).scalar()
|
||||
current_app.logger.debug(f"Recalculating storage for tenant {tenant} - Total storage: {total_storage}")
|
||||
|
||||
# Update the LicenseUsage with the recalculated storage
|
||||
license_usage = db.session.query(LicenseUsage).filter_by(tenant_id=tenant.id).first()
|
||||
license_usage.storage_mb_used = total_storage
|
||||
|
||||
# Reset the dirty flag after recalculating
|
||||
tenant.storage_dirty = False
|
||||
|
||||
# Commit the changes
|
||||
try:
|
||||
db.session.add(tenant)
|
||||
db.session.add(license_usage)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f"Error trying to update tenant {tenant.id} for Dirty Storage. ")
|
||||
|
||||
|
||||
@@ -1,101 +1,151 @@
|
||||
import io
|
||||
import os
|
||||
import time
|
||||
|
||||
import psutil
|
||||
from pydub import AudioSegment
|
||||
import tempfile
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from common.extensions import minio_client
|
||||
from common.utils.model_utils import create_language_template
|
||||
from .processor import Processor
|
||||
import subprocess
|
||||
|
||||
from .transcription_processor import TranscriptionProcessor
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
class AudioProcessor(Processor):
|
||||
|
||||
class AudioProcessor(TranscriptionProcessor):
|
||||
def __init__(self, tenant, model_variables, document_version):
|
||||
super().__init__(tenant, model_variables, document_version)
|
||||
self.transcription_client = model_variables['transcription_client']
|
||||
self.transcription_model = model_variables['transcription_model']
|
||||
self.ffmpeg_path = 'ffmpeg'
|
||||
self.max_compression_duration = model_variables['max_compression_duration']
|
||||
self.max_transcription_duration = model_variables['max_transcription_duration']
|
||||
self.compression_cpu_limit = model_variables.get('compression_cpu_limit', 50) # CPU usage limit in percentage
|
||||
self.compression_process_delay = model_variables.get('compression_process_delay', 0.1) # Delay between processing chunks in seconds
|
||||
self.file_type = document_version.file_type
|
||||
|
||||
def _get_transcription(self):
|
||||
file_data = minio_client.download_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.bucket_name,
|
||||
self.document_version.object_name,
|
||||
)
|
||||
|
||||
def process(self):
|
||||
self._log("Starting Audio processing")
|
||||
try:
|
||||
file_data = minio_client.download_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.doc_id,
|
||||
self.document_version.language,
|
||||
self.document_version.id,
|
||||
self.document_version.file_name
|
||||
)
|
||||
|
||||
with current_event.create_span("Audio Compression"):
|
||||
compressed_audio = self._compress_audio(file_data)
|
||||
with current_event.create_span("Audio Transcription"):
|
||||
transcription = self._transcribe_audio(compressed_audio)
|
||||
markdown, title = self._generate_markdown_from_transcription(transcription)
|
||||
|
||||
self._save_markdown(markdown)
|
||||
self._log("Finished processing Audio")
|
||||
return markdown, title
|
||||
except Exception as e:
|
||||
self._log(f"Error processing Audio: {str(e)}", level='error')
|
||||
raise
|
||||
return transcription
|
||||
|
||||
def _compress_audio(self, audio_data):
|
||||
self._log("Compressing audio")
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{self.document_version.file_type}') as temp_input:
|
||||
temp_input.write(audio_data)
|
||||
temp_input.flush()
|
||||
|
||||
# Use a unique filename for the output to avoid conflicts
|
||||
output_filename = f'compressed_{os.urandom(8).hex()}.mp3'
|
||||
output_path = os.path.join(tempfile.gettempdir(), output_filename)
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{self.document_version.file_type}') as temp_file:
|
||||
temp_file.write(audio_data)
|
||||
temp_file_path = temp_file.name
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[self.ffmpeg_path, '-y', '-i', temp_input.name, '-b:a', '64k', '-f', 'mp3', output_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
try:
|
||||
self._log("Creating AudioSegment from file")
|
||||
audio_info = AudioSegment.from_file(temp_file_path, format=self.document_version.file_type)
|
||||
self._log("Finished creating AudioSegment from file")
|
||||
total_duration = len(audio_info)
|
||||
self._log(f"Audio duration: {total_duration / 1000} seconds")
|
||||
|
||||
segment_length = self.max_compression_duration * 1000 # Convert to milliseconds
|
||||
total_chunks = (total_duration + segment_length - 1) // segment_length
|
||||
|
||||
compressed_segments = AudioSegment.empty()
|
||||
|
||||
for i in range(total_chunks):
|
||||
self._log(f"Compressing segment {i + 1} of {total_chunks}")
|
||||
|
||||
start_time = i * segment_length
|
||||
end_time = min((i + 1) * segment_length, total_duration)
|
||||
|
||||
chunk = AudioSegment.from_file(
|
||||
temp_file_path,
|
||||
format=self.document_version.file_type,
|
||||
start_second=start_time / 1000,
|
||||
duration=(end_time - start_time) / 1000
|
||||
)
|
||||
|
||||
with open(output_path, 'rb') as f:
|
||||
compressed_data = f.read()
|
||||
compressed_chunk = self._compress_segment(chunk)
|
||||
compressed_segments += compressed_chunk
|
||||
|
||||
# Save compressed audio to MinIO
|
||||
compressed_filename = f"{self.document_version.id}_compressed.mp3"
|
||||
time.sleep(self.compression_process_delay)
|
||||
|
||||
# Save compressed audio to MinIO
|
||||
compressed_filename = f"{self.document_version.id}_compressed.mp3"
|
||||
with io.BytesIO() as compressed_buffer:
|
||||
compressed_segments.export(compressed_buffer, format="mp3")
|
||||
compressed_buffer.seek(0)
|
||||
minio_client.upload_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.doc_id,
|
||||
self.document_version.language,
|
||||
self.document_version.id,
|
||||
compressed_filename,
|
||||
compressed_data
|
||||
compressed_buffer.read()
|
||||
)
|
||||
self._log(f"Saved compressed audio to MinIO: {compressed_filename}")
|
||||
self._log(f"Saved compressed audio to MinIO: {compressed_filename}")
|
||||
|
||||
return compressed_data
|
||||
return compressed_segments
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_message = f"Compression failed: {e.stderr}"
|
||||
self._log(error_message, level='error')
|
||||
raise Exception(error_message)
|
||||
except Exception as e:
|
||||
self._log(f"Error during audio processing: {str(e)}", level='error')
|
||||
raise
|
||||
finally:
|
||||
os.unlink(temp_file_path) # Ensure the temporary file is deleted
|
||||
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
os.unlink(temp_input.name)
|
||||
if os.path.exists(output_path):
|
||||
os.unlink(output_path)
|
||||
def _compress_segment(self, audio_segment):
|
||||
with io.BytesIO() as segment_buffer:
|
||||
audio_segment.export(segment_buffer, format="wav")
|
||||
segment_buffer.seek(0)
|
||||
|
||||
with io.BytesIO() as output_buffer:
|
||||
command = [
|
||||
'nice', '-n', '19',
|
||||
'ffmpeg',
|
||||
'-i', 'pipe:0',
|
||||
'-ar', '16000',
|
||||
'-ac', '1',
|
||||
'-b:a', '32k',
|
||||
'-filter:a', 'loudnorm',
|
||||
'-f', 'mp3',
|
||||
'pipe:1'
|
||||
]
|
||||
|
||||
process = psutil.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = process.communicate(input=segment_buffer.read())
|
||||
|
||||
if process.returncode != 0:
|
||||
self._log(f"FFmpeg error: {stderr.decode()}", level='error')
|
||||
raise Exception("FFmpeg compression failed")
|
||||
|
||||
output_buffer.write(stdout)
|
||||
output_buffer.seek(0)
|
||||
compressed_segment = AudioSegment.from_mp3(output_buffer)
|
||||
|
||||
return compressed_segment
|
||||
|
||||
def _transcribe_audio(self, audio_data):
|
||||
self._log("Starting audio transcription")
|
||||
audio = AudioSegment.from_file(io.BytesIO(audio_data), format="mp3")
|
||||
# audio = AudioSegment.from_file(io.BytesIO(audio_data), format="mp3")
|
||||
audio = audio_data
|
||||
|
||||
segment_length = 10 * 60 * 1000 # 10 minutes in milliseconds
|
||||
segment_length = self.max_transcription_duration * 1000 # calculate milliseconds
|
||||
transcriptions = []
|
||||
total_chunks = len(audio) // segment_length + 1
|
||||
|
||||
for i, chunk in enumerate(audio[::segment_length]):
|
||||
self._log(f'Processing chunk {i + 1} of {len(audio) // segment_length + 1}')
|
||||
self._log(f'Processing chunk {i + 1} of {total_chunks}')
|
||||
segment_duration = 0
|
||||
if i == total_chunks - 1:
|
||||
segment_duration = (len(audio) % segment_length) // 1000
|
||||
else:
|
||||
segment_duration = self.max_transcription_duration
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_audio:
|
||||
chunk.export(temp_audio.name, format="mp3")
|
||||
@@ -111,11 +161,12 @@ class AudioProcessor(Processor):
|
||||
audio_file.seek(0) # Reset file pointer to the beginning
|
||||
|
||||
self._log("Calling transcription API")
|
||||
transcription = self.transcription_client.audio.transcriptions.create(
|
||||
transcription = self.model_variables.transcribe(
|
||||
file=audio_file,
|
||||
model=self.transcription_model,
|
||||
language=self.document_version.language,
|
||||
response_format='verbose_json',
|
||||
duration=segment_duration,
|
||||
)
|
||||
self._log("Transcription API call completed")
|
||||
|
||||
@@ -159,29 +210,3 @@ class AudioProcessor(Processor):
|
||||
|
||||
return full_transcription
|
||||
|
||||
def _generate_markdown_from_transcription(self, transcription):
|
||||
self._log("Generating markdown from transcription")
|
||||
llm = self.model_variables['llm']
|
||||
template = self.model_variables['transcript_template']
|
||||
language_template = create_language_template(template, self.document_version.language)
|
||||
transcript_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
setup = RunnablePassthrough()
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
chain = setup | transcript_prompt | llm | output_parser
|
||||
|
||||
input_transcript = {'transcript': transcription}
|
||||
markdown = chain.invoke(input_transcript)
|
||||
|
||||
# Extract title from the markdown
|
||||
title = self._extract_title_from_markdown(markdown)
|
||||
|
||||
return markdown, title
|
||||
|
||||
def _extract_title_from_markdown(self, markdown):
|
||||
# Simple extraction of the first header as the title
|
||||
lines = markdown.split('\n')
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
return line[2:].strip()
|
||||
return "Untitled Audio Transcription"
|
||||
|
||||
@@ -5,6 +5,7 @@ from langchain_core.runnables import RunnablePassthrough
|
||||
from common.extensions import db, minio_client
|
||||
from common.utils.model_utils import create_language_template
|
||||
from .processor import Processor
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class HTMLProcessor(Processor):
|
||||
@@ -14,21 +15,24 @@ class HTMLProcessor(Processor):
|
||||
self.html_end_tags = model_variables['html_end_tags']
|
||||
self.html_included_elements = model_variables['html_included_elements']
|
||||
self.html_excluded_elements = model_variables['html_excluded_elements']
|
||||
self.chunk_size = model_variables['processing_chunk_size'] # Adjust this based on your LLM's optimal input size
|
||||
self.chunk_overlap = model_variables[
|
||||
'processing_chunk_overlap'] # Adjust for context preservation between chunks
|
||||
|
||||
def process(self):
|
||||
self._log("Starting HTML processing")
|
||||
try:
|
||||
file_data = minio_client.download_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.doc_id,
|
||||
self.document_version.language,
|
||||
self.document_version.id,
|
||||
self.document_version.file_name
|
||||
self.document_version.bucket_name,
|
||||
self.document_version.object_name,
|
||||
)
|
||||
html_content = file_data.decode('utf-8')
|
||||
|
||||
extracted_html, title = self._parse_html(html_content)
|
||||
markdown = self._generate_markdown_from_html(extracted_html)
|
||||
with current_event.create_span("HTML Content Extraction"):
|
||||
extracted_html, title = self._parse_html(html_content)
|
||||
with current_event.create_span("Markdown Generation"):
|
||||
markdown = self._generate_markdown_from_html(extracted_html)
|
||||
|
||||
self._save_markdown(markdown)
|
||||
self._log("Finished processing HTML")
|
||||
@@ -70,7 +74,7 @@ class HTMLProcessor(Processor):
|
||||
chain = setup | parse_prompt | llm | output_parser
|
||||
|
||||
soup = BeautifulSoup(html_content, 'lxml')
|
||||
chunks = self._split_content(soup)
|
||||
chunks = self._split_content(soup, self.chunk_size)
|
||||
|
||||
markdown_chunks = []
|
||||
for chunk in chunks:
|
||||
@@ -115,9 +119,10 @@ class HTMLProcessor(Processor):
|
||||
|
||||
def _parse_excluded_classes(self, excluded_classes):
|
||||
parsed = {}
|
||||
for rule in excluded_classes:
|
||||
element, cls = rule.split('.', 1)
|
||||
parsed.setdefault(element, set()).add(cls)
|
||||
if excluded_classes:
|
||||
for rule in excluded_classes:
|
||||
element, cls = rule.split('.', 1)
|
||||
parsed.setdefault(element, set()).add(cls)
|
||||
return parsed
|
||||
|
||||
def _should_exclude_element(self, element, excluded_classes):
|
||||
|
||||
@@ -10,35 +10,35 @@ from langchain_core.runnables import RunnablePassthrough
|
||||
from common.extensions import minio_client
|
||||
from common.utils.model_utils import create_language_template
|
||||
from .processor import Processor
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class PDFProcessor(Processor):
|
||||
def __init__(self, tenant, model_variables, document_version):
|
||||
super().__init__(tenant, model_variables, document_version)
|
||||
# PDF-specific initialization
|
||||
self.chunk_size = model_variables['PDF_chunk_size']
|
||||
self.chunk_overlap = model_variables['PDF_chunk_overlap']
|
||||
self.min_chunk_size = model_variables['PDF_min_chunk_size']
|
||||
self.max_chunk_size = model_variables['PDF_max_chunk_size']
|
||||
self.chunk_size = model_variables['processing_chunk_size']
|
||||
self.chunk_overlap = model_variables['processing_chunk_overlap']
|
||||
self.min_chunk_size = model_variables['processing_min_chunk_size']
|
||||
self.max_chunk_size = model_variables['processing_max_chunk_size']
|
||||
|
||||
def process(self):
|
||||
self._log("Starting PDF processing")
|
||||
try:
|
||||
file_data = minio_client.download_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.doc_id,
|
||||
self.document_version.language,
|
||||
self.document_version.id,
|
||||
self.document_version.file_name
|
||||
self.document_version.bucket_name,
|
||||
self.document_version.object_name,
|
||||
)
|
||||
|
||||
extracted_content = self._extract_content(file_data)
|
||||
structured_content, title = self._structure_content(extracted_content)
|
||||
with current_event.create_span("PDF Extraction"):
|
||||
extracted_content = self._extract_content(file_data)
|
||||
structured_content, title = self._structure_content(extracted_content)
|
||||
|
||||
llm_chunks = self._split_content_for_llm(structured_content)
|
||||
markdown = self._process_chunks_with_llm(llm_chunks)
|
||||
|
||||
self._save_markdown(markdown)
|
||||
with current_event.create_span("Markdown Generation"):
|
||||
llm_chunks = self._split_content_for_llm(structured_content)
|
||||
markdown = self._process_chunks_with_llm(llm_chunks)
|
||||
self._save_markdown(markdown)
|
||||
self._log("Finished processing PDF")
|
||||
return markdown, title
|
||||
except Exception as e:
|
||||
@@ -228,12 +228,7 @@ class PDFProcessor(Processor):
|
||||
for chunk in chunks:
|
||||
input = {"pdf_content": chunk}
|
||||
result = chain.invoke(input)
|
||||
# Remove Markdown code block delimiters if present
|
||||
result = result.strip()
|
||||
if result.startswith("```markdown"):
|
||||
result = result[len("```markdown"):].strip()
|
||||
if result.endswith("```"):
|
||||
result = result[:-3].strip()
|
||||
result = self._clean_markdown(result)
|
||||
markdown_chunks.append(result)
|
||||
|
||||
return "\n\n".join(markdown_chunks)
|
||||
|
||||
@@ -40,3 +40,13 @@ class Processor(ABC):
|
||||
filename,
|
||||
content.encode('utf-8')
|
||||
)
|
||||
|
||||
def _clean_markdown(self, markdown):
|
||||
markdown = markdown.strip()
|
||||
if markdown.startswith("```markdown"):
|
||||
markdown = markdown[len("```markdown"):].strip()
|
||||
if markdown.endswith("```"):
|
||||
markdown = markdown[:-3].strip()
|
||||
|
||||
return markdown
|
||||
|
||||
|
||||
@@ -1,37 +1,17 @@
|
||||
import re
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from common.extensions import minio_client
|
||||
from common.utils.model_utils import create_language_template
|
||||
from .processor import Processor
|
||||
from .transcription_processor import TranscriptionProcessor
|
||||
import re
|
||||
|
||||
|
||||
class SRTProcessor(Processor):
|
||||
def __init__(self, tenant, model_variables, document_version):
|
||||
super().__init__(tenant, model_variables, document_version)
|
||||
|
||||
def process(self):
|
||||
self._log("Starting SRT processing")
|
||||
try:
|
||||
file_data = minio_client.download_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.doc_id,
|
||||
self.document_version.language,
|
||||
self.document_version.id,
|
||||
self.document_version.file_name
|
||||
)
|
||||
|
||||
srt_content = file_data.decode('utf-8')
|
||||
cleaned_transcription = self._clean_srt(srt_content)
|
||||
markdown, title = self._generate_markdown_from_transcription(cleaned_transcription)
|
||||
|
||||
self._save_markdown(markdown)
|
||||
self._log("Finished processing SRT")
|
||||
return markdown, title
|
||||
except Exception as e:
|
||||
self._log(f"Error processing SRT: {str(e)}", level='error')
|
||||
raise
|
||||
class SRTProcessor(TranscriptionProcessor):
|
||||
def _get_transcription(self):
|
||||
file_data = minio_client.download_document_file(
|
||||
self.tenant.id,
|
||||
self.document_version.bucket_name,
|
||||
self.document_version.object_name,
|
||||
)
|
||||
srt_content = file_data.decode('utf-8')
|
||||
return self._clean_srt(srt_content)
|
||||
|
||||
def _clean_srt(self, srt_content):
|
||||
# Remove timecodes and subtitle numbers
|
||||
@@ -50,31 +30,3 @@ class SRTProcessor(Processor):
|
||||
|
||||
return cleaned_text
|
||||
|
||||
def _generate_markdown_from_transcription(self, transcription):
|
||||
self._log("Generating markdown from transcription")
|
||||
llm = self.model_variables['llm']
|
||||
template = self.model_variables['transcript_template']
|
||||
language_template = create_language_template(template, self.document_version.language)
|
||||
transcript_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
setup = RunnablePassthrough()
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
chain = setup | transcript_prompt | llm | output_parser
|
||||
|
||||
input_transcript = {'transcript': transcription}
|
||||
markdown = chain.invoke(input_transcript)
|
||||
|
||||
# Extract title from the markdown
|
||||
title = self._extract_title_from_markdown(markdown)
|
||||
|
||||
return markdown, title
|
||||
|
||||
def _extract_title_from_markdown(self, markdown):
|
||||
# Simple extraction of the first header as the title
|
||||
lines = markdown.split('\n')
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
return line[2:].strip()
|
||||
return "Untitled SRT Transcription"
|
||||
|
||||
|
||||
|
||||
94
eveai_workers/Processors/transcription_processor.py
Normal file
94
eveai_workers/Processors/transcription_processor.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# transcription_processor.py
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
|
||||
from common.utils.model_utils import create_language_template
|
||||
from .processor import Processor
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class TranscriptionProcessor(Processor):
|
||||
def __init__(self, tenant, model_variables, document_version):
|
||||
super().__init__(tenant, model_variables, document_version)
|
||||
self.chunk_size = model_variables['processing_chunk_size']
|
||||
self.chunk_overlap = model_variables['processing_chunk_overlap']
|
||||
|
||||
def process(self):
|
||||
self._log("Starting Transcription processing")
|
||||
try:
|
||||
with current_event.create_span("Transcription Generation"):
|
||||
transcription = self._get_transcription()
|
||||
with current_event.create_span("Markdown Generation"):
|
||||
chunks = self._chunk_transcription(transcription)
|
||||
markdown_chunks = self._process_chunks(chunks)
|
||||
full_markdown = self._combine_markdown_chunks(markdown_chunks)
|
||||
self._save_markdown(full_markdown)
|
||||
self._log("Finished processing Transcription")
|
||||
return full_markdown, self._extract_title_from_markdown(full_markdown)
|
||||
except Exception as e:
|
||||
self._log(f"Error processing Transcription: {str(e)}", level='error')
|
||||
raise
|
||||
|
||||
def _get_transcription(self):
|
||||
# This method should be implemented by child classes
|
||||
raise NotImplementedError
|
||||
|
||||
def _chunk_transcription(self, transcription):
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=self.chunk_size,
|
||||
chunk_overlap=self.chunk_overlap,
|
||||
length_function=len,
|
||||
separators=["\n\n", "\n", " ", ""]
|
||||
)
|
||||
return text_splitter.split_text(transcription)
|
||||
|
||||
def _process_chunks(self, chunks):
|
||||
self._log("Generating markdown from transcription")
|
||||
llm = self.model_variables['llm']
|
||||
template = self.model_variables['transcript_template']
|
||||
language_template = create_language_template(template, self.document_version.language)
|
||||
transcript_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
setup = RunnablePassthrough()
|
||||
output_parser = StrOutputParser()
|
||||
|
||||
chain = setup | transcript_prompt | llm | output_parser
|
||||
|
||||
markdown_chunks = []
|
||||
previous_part = ""
|
||||
for i, chunk in enumerate(chunks):
|
||||
self._log(f"Processing chunk {i + 1} of {len(chunks)}")
|
||||
self._log(f"Previous part: {previous_part}")
|
||||
input_transcript = {
|
||||
'transcript': chunk,
|
||||
'previous_part': previous_part
|
||||
}
|
||||
markdown = chain.invoke(input_transcript)
|
||||
markdown = self._clean_markdown(markdown)
|
||||
markdown_chunks.append(markdown)
|
||||
|
||||
# Extract the last part for the next iteration
|
||||
lines = markdown.split('\n')
|
||||
last_header = None
|
||||
for line in reversed(lines):
|
||||
if line.startswith('#'):
|
||||
last_header = line
|
||||
break
|
||||
if last_header:
|
||||
header_index = lines.index(last_header)
|
||||
previous_part = '\n'.join(lines[header_index:])
|
||||
else:
|
||||
previous_part = lines[-1] if lines else ""
|
||||
|
||||
return markdown_chunks
|
||||
|
||||
def _combine_markdown_chunks(self, markdown_chunks):
|
||||
return "\n\n".join(markdown_chunks)
|
||||
|
||||
def _extract_title_from_markdown(self, markdown):
|
||||
lines = markdown.split('\n')
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
return line[2:].strip()
|
||||
return "Untitled Transcription"
|
||||
@@ -44,3 +44,4 @@ def register_extensions(app):
|
||||
|
||||
|
||||
app, celery = create_app()
|
||||
|
||||
|
||||
@@ -24,11 +24,18 @@ from eveai_workers.Processors.html_processor import HTMLProcessor
|
||||
from eveai_workers.Processors.pdf_processor import PDFProcessor
|
||||
from eveai_workers.Processors.srt_processor import SRTProcessor
|
||||
|
||||
from common.utils.business_event import BusinessEvent
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
# Healthcheck task
|
||||
@current_celery.task(name='ping', queue='embeddings')
|
||||
def ping():
|
||||
return 'pong'
|
||||
|
||||
|
||||
@current_celery.task(name='create_embeddings', queue='embeddings')
|
||||
def create_embeddings(tenant_id, document_version_id):
|
||||
current_app.logger.info(f'Creating embeddings for tenant {tenant_id} on document version {document_version_id}.')
|
||||
|
||||
try:
|
||||
# Retrieve Tenant for which we are processing
|
||||
tenant = Tenant.query.get(tenant_id)
|
||||
@@ -38,15 +45,15 @@ def create_embeddings(tenant_id, document_version_id):
|
||||
# Ensure we are working in the correct database schema
|
||||
Database(tenant_id).switch_schema()
|
||||
|
||||
# Select variables to work with depending on tenant and model
|
||||
model_variables = select_model_variables(tenant)
|
||||
current_app.logger.debug(f'Model variables: {model_variables}')
|
||||
|
||||
# Retrieve document version to process
|
||||
document_version = DocumentVersion.query.get(document_version_id)
|
||||
if document_version is None:
|
||||
raise Exception(f'Document version {document_version_id} not found')
|
||||
|
||||
# Select variables to work with depending on tenant and model
|
||||
model_variables = select_model_variables(tenant)
|
||||
current_app.logger.debug(f'Model variables: {model_variables}')
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Create Embeddings request received '
|
||||
f'for non existing document version {document_version_id} '
|
||||
@@ -54,49 +61,56 @@ def create_embeddings(tenant_id, document_version_id):
|
||||
f'error: {e}')
|
||||
raise
|
||||
|
||||
try:
|
||||
db.session.add(document_version)
|
||||
# BusinessEvent creates a context, which is why we need to use it with a with block
|
||||
with BusinessEvent('Create Embeddings', tenant_id,
|
||||
document_version_id=document_version_id,
|
||||
document_version_file_size=document_version.file_size):
|
||||
current_app.logger.info(f'Creating embeddings for tenant {tenant_id} on document version {document_version_id}')
|
||||
|
||||
# start processing
|
||||
document_version.processing = True
|
||||
document_version.processing_started_at = dt.now(tz.utc)
|
||||
document_version.processing_finished_at = None
|
||||
document_version.processing_error = None
|
||||
try:
|
||||
db.session.add(document_version)
|
||||
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'Unable to save Embedding status information '
|
||||
f'in document version {document_version_id} '
|
||||
f'for tenant {tenant_id}')
|
||||
raise
|
||||
# start processing
|
||||
document_version.processing = True
|
||||
document_version.processing_started_at = dt.now(tz.utc)
|
||||
document_version.processing_finished_at = None
|
||||
document_version.processing_error = None
|
||||
|
||||
delete_embeddings_for_document_version(document_version)
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f'Unable to save Embedding status information '
|
||||
f'in document version {document_version_id} '
|
||||
f'for tenant {tenant_id}')
|
||||
raise
|
||||
|
||||
try:
|
||||
match document_version.file_type:
|
||||
case 'pdf':
|
||||
process_pdf(tenant, model_variables, document_version)
|
||||
case 'html':
|
||||
process_html(tenant, model_variables, document_version)
|
||||
case 'srt':
|
||||
process_srt(tenant, model_variables, document_version)
|
||||
case 'mp4' | 'mp3' | 'ogg':
|
||||
process_audio(tenant, model_variables, document_version)
|
||||
case _:
|
||||
raise Exception(f'No functionality defined for file type {document_version.file_type} '
|
||||
f'for tenant {tenant_id} '
|
||||
f'while creating embeddings for document version {document_version_id}')
|
||||
delete_embeddings_for_document_version(document_version)
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Error creating embeddings for tenant {tenant_id} '
|
||||
f'on document version {document_version_id} '
|
||||
f'error: {e}')
|
||||
document_version.processing = False
|
||||
document_version.processing_finished_at = dt.now(tz.utc)
|
||||
document_version.processing_error = str(e)[:255]
|
||||
db.session.commit()
|
||||
create_embeddings.update_state(state=states.FAILURE)
|
||||
raise
|
||||
try:
|
||||
match document_version.file_type:
|
||||
case 'pdf':
|
||||
process_pdf(tenant, model_variables, document_version)
|
||||
case 'html':
|
||||
process_html(tenant, model_variables, document_version)
|
||||
case 'srt':
|
||||
process_srt(tenant, model_variables, document_version)
|
||||
case 'mp4' | 'mp3' | 'ogg':
|
||||
process_audio(tenant, model_variables, document_version)
|
||||
case _:
|
||||
raise Exception(f'No functionality defined for file type {document_version.file_type} '
|
||||
f'for tenant {tenant_id} '
|
||||
f'while creating embeddings for document version {document_version_id}')
|
||||
current_event.log("Finished Embedding Creation Task")
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Error creating embeddings for tenant {tenant_id} '
|
||||
f'on document version {document_version_id} '
|
||||
f'error: {e}')
|
||||
document_version.processing = False
|
||||
document_version.processing_finished_at = dt.now(tz.utc)
|
||||
document_version.processing_error = str(e)[:255]
|
||||
db.session.commit()
|
||||
create_embeddings.update_state(state=states.FAILURE)
|
||||
raise
|
||||
|
||||
|
||||
def delete_embeddings_for_document_version(document_version):
|
||||
@@ -112,35 +126,43 @@ def delete_embeddings_for_document_version(document_version):
|
||||
|
||||
|
||||
def process_pdf(tenant, model_variables, document_version):
|
||||
processor = PDFProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
with current_event.create_span("PDF Processing"):
|
||||
processor = PDFProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
|
||||
# Process markdown and embed
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
with current_event.create_span("Embedding"):
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
|
||||
|
||||
def process_html(tenant, model_variables, document_version):
|
||||
processor = HTMLProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
with current_event.create_span("HTML Processing"):
|
||||
processor = HTMLProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
|
||||
# Process markdown and embed
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
with current_event.create_span("Embedding"):
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
|
||||
|
||||
def process_audio(tenant, model_variables, document_version):
|
||||
processor = AudioProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
with current_event.create_span("Audio Processing"):
|
||||
processor = AudioProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
|
||||
# Process markdown and embed
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
with current_event.create_span("Embedding"):
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
|
||||
|
||||
def process_srt(tenant, model_variables, document_version):
|
||||
processor = SRTProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
with current_event.create_span("SRT Processing"):
|
||||
processor = SRTProcessor(tenant, model_variables, document_version)
|
||||
markdown, title = processor.process()
|
||||
|
||||
# Process markdown and embed
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
with current_event.create_span("Embedding"):
|
||||
embed_markdown(tenant, model_variables, document_version, markdown, title)
|
||||
|
||||
|
||||
def embed_markdown(tenant, model_variables, document_version, markdown, title):
|
||||
@@ -152,10 +174,12 @@ def embed_markdown(tenant, model_variables, document_version, markdown, title):
|
||||
model_variables['max_chunk_size'])
|
||||
|
||||
# Enrich chunks
|
||||
enriched_chunks = enrich_chunks(tenant, model_variables, document_version, title, chunks)
|
||||
with current_event.create_span("Enrich Chunks"):
|
||||
enriched_chunks = enrich_chunks(tenant, model_variables, document_version, title, chunks)
|
||||
|
||||
# Create embeddings
|
||||
embeddings = embed_chunks(tenant, model_variables, document_version, enriched_chunks)
|
||||
with current_event.create_span("Create Embeddings"):
|
||||
embeddings = embed_chunks(tenant, model_variables, document_version, enriched_chunks)
|
||||
|
||||
# Update document version and save embeddings
|
||||
try:
|
||||
@@ -182,16 +206,23 @@ def enrich_chunks(tenant, model_variables, document_version, title, chunks):
|
||||
if len(chunks) > 1:
|
||||
summary = summarize_chunk(tenant, model_variables, document_version, chunks[0])
|
||||
|
||||
chunk_total_context = (f'Filename: {document_version.file_name}\n'
|
||||
chunk_total_context = (f'Filename: {document_version.object_name}\n'
|
||||
f'User Context:\n{document_version.user_context}\n\n'
|
||||
f'User Metadata:\n{document_version.user_metadata}\n\n'
|
||||
f'Title: {title}\n'
|
||||
f'{summary}\n'
|
||||
f'{document_version.system_context}\n\n')
|
||||
f'Summary:\n{summary}\n'
|
||||
f'System Context:\n{document_version.system_context}\n\n'
|
||||
f'System Metadata:\n{document_version.system_metadata}\n\n'
|
||||
)
|
||||
enriched_chunks = []
|
||||
initial_chunk = (f'Filename: {document_version.file_name}\n'
|
||||
initial_chunk = (f'Filename: {document_version.object_name}\n'
|
||||
f'User Context:\n{document_version.user_context}\n\n'
|
||||
f'User Metadata:\n{document_version.user_metadata}\n\n'
|
||||
f'Title: {title}\n'
|
||||
f'{chunks[0]}')
|
||||
f'System Context:\n{document_version.system_context}\n\n'
|
||||
f'System Metadata:\n{document_version.system_metadata}\n\n'
|
||||
f'{chunks[0]}'
|
||||
)
|
||||
|
||||
enriched_chunks.append(initial_chunk)
|
||||
for chunk in chunks[1:]:
|
||||
@@ -205,6 +236,7 @@ def enrich_chunks(tenant, model_variables, document_version, title, chunks):
|
||||
|
||||
|
||||
def summarize_chunk(tenant, model_variables, document_version, chunk):
|
||||
current_event.log("Starting Summarizing Chunk")
|
||||
current_app.logger.debug(f'Summarizing chunk for tenant {tenant.id} '
|
||||
f'on document version {document_version.id}')
|
||||
llm = model_variables['llm']
|
||||
@@ -222,6 +254,7 @@ def summarize_chunk(tenant, model_variables, document_version, chunk):
|
||||
summary = chain.invoke({"text": chunk})
|
||||
current_app.logger.debug(f'Finished summarizing chunk for tenant {tenant.id} '
|
||||
f'on document version {document_version.id}.')
|
||||
current_event.log("Finished Summarizing Chunk")
|
||||
return summary
|
||||
except LangChainException as e:
|
||||
current_app.logger.error(f'Error creating summary for chunk enrichment for tenant {tenant.id} '
|
||||
@@ -255,6 +288,8 @@ def embed_chunks(tenant, model_variables, document_version, chunks):
|
||||
new_embedding.embedding = embedding
|
||||
new_embeddings.append(new_embedding)
|
||||
|
||||
current_app.logger.debug(f'Finished embedding chunks for tenant {tenant.id} ')
|
||||
|
||||
return new_embeddings
|
||||
|
||||
|
||||
@@ -268,254 +303,15 @@ def log_parsing_info(tenant, tags, included_elements, excluded_elements, exclude
|
||||
current_app.embed_tuning_logger.debug(f'First element to parse: {elements_to_parse[0]}')
|
||||
|
||||
|
||||
# def process_youtube(tenant, model_variables, document_version):
|
||||
# download_file_name = f'{document_version.id}.mp4'
|
||||
# compressed_file_name = f'{document_version.id}.mp3'
|
||||
# transcription_file_name = f'{document_version.id}.txt'
|
||||
# markdown_file_name = f'{document_version.id}.md'
|
||||
#
|
||||
# # Remove existing files (in case of a re-processing of the file
|
||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
||||
# document_version.id, download_file_name)
|
||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
||||
# document_version.id, compressed_file_name)
|
||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
||||
# document_version.id, transcription_file_name)
|
||||
# minio_client.delete_document_file(tenant.id, document_version.doc_id, document_version.language,
|
||||
# document_version.id, markdown_file_name)
|
||||
#
|
||||
# of, title, description, author = download_youtube(document_version.url, tenant.id, document_version,
|
||||
# download_file_name)
|
||||
# document_version.system_context = f'Title: {title}\nDescription: {description}\nAuthor: {author}'
|
||||
# compress_audio(tenant.id, document_version, download_file_name, compressed_file_name)
|
||||
# transcribe_audio(tenant.id, document_version, compressed_file_name, transcription_file_name, model_variables)
|
||||
# annotate_transcription(tenant, document_version, transcription_file_name, markdown_file_name, model_variables)
|
||||
#
|
||||
# potential_chunks = create_potential_chunks_for_markdown(tenant.id, document_version, markdown_file_name)
|
||||
# actual_chunks = combine_chunks_for_markdown(potential_chunks, model_variables['min_chunk_size'],
|
||||
# model_variables['max_chunk_size'])
|
||||
#
|
||||
# enriched_chunks = enrich_chunks(tenant, document_version, actual_chunks)
|
||||
# embeddings = embed_chunks(tenant, model_variables, document_version, enriched_chunks)
|
||||
#
|
||||
# try:
|
||||
# db.session.add(document_version)
|
||||
# document_version.processing_finished_at = dt.now(tz.utc)
|
||||
# document_version.processing = False
|
||||
# db.session.add_all(embeddings)
|
||||
# db.session.commit()
|
||||
# except SQLAlchemyError as e:
|
||||
# current_app.logger.error(f'Error saving embedding information for tenant {tenant.id} '
|
||||
# f'on Youtube document version {document_version.id}'
|
||||
# f'error: {e}')
|
||||
# raise
|
||||
#
|
||||
# current_app.logger.info(f'Embeddings created successfully for tenant {tenant.id} '
|
||||
# f'on Youtube document version {document_version.id} :-)')
|
||||
#
|
||||
#
|
||||
# def download_youtube(url, tenant_id, document_version, file_name):
|
||||
# try:
|
||||
# current_app.logger.info(f'Downloading YouTube video: {url} for tenant: {tenant_id}')
|
||||
# yt = YouTube(url)
|
||||
# stream = yt.streams.get_audio_only()
|
||||
#
|
||||
# with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
||||
# stream.download(output_path=temp_file.name)
|
||||
# with open(temp_file.name, 'rb') as f:
|
||||
# file_data = f.read()
|
||||
#
|
||||
# minio_client.upload_document_file(tenant_id, document_version.doc_id, document_version.language,
|
||||
# document_version.id,
|
||||
# file_name, file_data)
|
||||
#
|
||||
# current_app.logger.info(f'Downloaded YouTube video: {url} for tenant: {tenant_id}')
|
||||
# return file_name, yt.title, yt.description, yt.author
|
||||
# except Exception as e:
|
||||
# current_app.logger.error(f'Error downloading YouTube video: {url} for tenant: {tenant_id} with error: {e}')
|
||||
# raise
|
||||
#
|
||||
#
|
||||
# def compress_audio(tenant_id, document_version, input_file, output_file):
|
||||
# try:
|
||||
# current_app.logger.info(f'Compressing audio for tenant: {tenant_id}')
|
||||
#
|
||||
# input_data = minio_client.download_document_file(tenant_id, document_version.doc_id, document_version.language,
|
||||
# document_version.id, input_file)
|
||||
#
|
||||
# with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_input:
|
||||
# temp_input.write(input_data)
|
||||
# temp_input.flush()
|
||||
#
|
||||
# with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_output:
|
||||
# result = subprocess.run(
|
||||
# ['ffmpeg', '-i', temp_input.name, '-b:a', '64k', '-f', 'mp3', temp_output.name],
|
||||
# capture_output=True,
|
||||
# text=True
|
||||
# )
|
||||
#
|
||||
# if result.returncode != 0:
|
||||
# raise Exception(f"Compression failed: {result.stderr}")
|
||||
#
|
||||
# with open(temp_output.name, 'rb') as f:
|
||||
# compressed_data = f.read()
|
||||
#
|
||||
# minio_client.upload_document_file(tenant_id, document_version.doc_id, document_version.language,
|
||||
# document_version.id,
|
||||
# output_file, compressed_data)
|
||||
#
|
||||
# current_app.logger.info(f'Compressed audio for tenant: {tenant_id}')
|
||||
# except Exception as e:
|
||||
# current_app.logger.error(f'Error compressing audio for tenant: {tenant_id} with error: {e}')
|
||||
# raise
|
||||
#
|
||||
#
|
||||
# def transcribe_audio(tenant_id, document_version, input_file, output_file, model_variables):
|
||||
# try:
|
||||
# current_app.logger.info(f'Transcribing audio for tenant: {tenant_id}')
|
||||
# client = model_variables['transcription_client']
|
||||
# model = model_variables['transcription_model']
|
||||
#
|
||||
# # Download the audio file from MinIO
|
||||
# audio_data = minio_client.download_document_file(tenant_id, document_version.doc_id, document_version.language,
|
||||
# document_version.id, input_file)
|
||||
#
|
||||
# # Load the audio data into pydub
|
||||
# audio = AudioSegment.from_mp3(io.BytesIO(audio_data))
|
||||
#
|
||||
# # Define segment length (e.g., 10 minutes)
|
||||
# segment_length = 10 * 60 * 1000 # 10 minutes in milliseconds
|
||||
#
|
||||
# transcriptions = []
|
||||
#
|
||||
# # Split audio into segments and transcribe each
|
||||
# for i, chunk in enumerate(audio[::segment_length]):
|
||||
# current_app.logger.debug(f'Transcribing chunk {i + 1} of {len(audio) // segment_length + 1}')
|
||||
#
|
||||
# with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_audio:
|
||||
# chunk.export(temp_audio.name, format="mp3")
|
||||
#
|
||||
# with open(temp_audio.name, 'rb') as audio_segment:
|
||||
# transcription = client.audio.transcriptions.create(
|
||||
# file=audio_segment,
|
||||
# model=model,
|
||||
# language=document_version.language,
|
||||
# response_format='verbose_json',
|
||||
# )
|
||||
#
|
||||
# transcriptions.append(transcription.text)
|
||||
#
|
||||
# os.unlink(temp_audio.name) # Delete the temporary file
|
||||
#
|
||||
# # Combine all transcriptions
|
||||
# full_transcription = " ".join(transcriptions)
|
||||
#
|
||||
# # Upload the full transcription to MinIO
|
||||
# minio_client.upload_document_file(
|
||||
# tenant_id,
|
||||
# document_version.doc_id,
|
||||
# document_version.language,
|
||||
# document_version.id,
|
||||
# output_file,
|
||||
# full_transcription.encode('utf-8')
|
||||
# )
|
||||
#
|
||||
# current_app.logger.info(f'Transcribed audio for tenant: {tenant_id}')
|
||||
# except Exception as e:
|
||||
# current_app.logger.error(f'Error transcribing audio for tenant: {tenant_id}, with error: {e}')
|
||||
# raise
|
||||
#
|
||||
#
|
||||
# def annotate_transcription(tenant, document_version, input_file, output_file, model_variables):
|
||||
# try:
|
||||
# current_app.logger.debug(f'Annotating transcription for tenant {tenant.id}')
|
||||
#
|
||||
# char_splitter = CharacterTextSplitter(separator='.',
|
||||
# chunk_size=model_variables['annotation_chunk_length'],
|
||||
# chunk_overlap=0)
|
||||
#
|
||||
# headers_to_split_on = [
|
||||
# ("#", "Header 1"),
|
||||
# ("##", "Header 2"),
|
||||
# ]
|
||||
# markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on, strip_headers=False)
|
||||
#
|
||||
# llm = model_variables['llm']
|
||||
# template = model_variables['transcript_template']
|
||||
# language_template = create_language_template(template, document_version.language)
|
||||
# transcript_prompt = ChatPromptTemplate.from_template(language_template)
|
||||
# setup = RunnablePassthrough()
|
||||
# output_parser = StrOutputParser()
|
||||
#
|
||||
# # Download the transcription file from MinIO
|
||||
# transcript_data = minio_client.download_document_file(tenant.id, document_version.doc_id,
|
||||
# document_version.language, document_version.id,
|
||||
# input_file)
|
||||
# transcript = transcript_data.decode('utf-8')
|
||||
#
|
||||
# chain = setup | transcript_prompt | llm | output_parser
|
||||
#
|
||||
# chunks = char_splitter.split_text(transcript)
|
||||
# all_markdown_chunks = []
|
||||
# last_markdown_chunk = ''
|
||||
# for chunk in chunks:
|
||||
# current_app.logger.debug(f'Annotating next chunk of {len(chunks)} for tenant {tenant.id}')
|
||||
# full_input = last_markdown_chunk + '\n' + chunk
|
||||
# if tenant.embed_tuning:
|
||||
# current_app.embed_tuning_logger.debug(f'Annotating chunk: \n '
|
||||
# f'------------------\n'
|
||||
# f'{full_input}\n'
|
||||
# f'------------------\n')
|
||||
# input_transcript = {'transcript': full_input}
|
||||
# markdown = chain.invoke(input_transcript)
|
||||
# # GPT-4o returns some kind of content description: ```markdown <text> ```
|
||||
# if markdown.startswith("```markdown"):
|
||||
# markdown = "\n".join(markdown.strip().split("\n")[1:-1])
|
||||
# if tenant.embed_tuning:
|
||||
# current_app.embed_tuning_logger.debug(f'Markdown Received: \n '
|
||||
# f'------------------\n'
|
||||
# f'{markdown}\n'
|
||||
# f'------------------\n')
|
||||
# md_header_splits = markdown_splitter.split_text(markdown)
|
||||
# markdown_chunks = [doc.page_content for doc in md_header_splits]
|
||||
# # claude-3.5-sonnet returns introductory text
|
||||
# if not markdown_chunks[0].startswith('#'):
|
||||
# markdown_chunks.pop(0)
|
||||
# last_markdown_chunk = markdown_chunks[-1]
|
||||
# last_markdown_chunk = "\n".join(markdown.strip().split("\n")[1:])
|
||||
# markdown_chunks.pop()
|
||||
# all_markdown_chunks += markdown_chunks
|
||||
#
|
||||
# all_markdown_chunks += [last_markdown_chunk]
|
||||
#
|
||||
# annotated_transcript = '\n'.join(all_markdown_chunks)
|
||||
#
|
||||
# # Upload the annotated transcript to MinIO
|
||||
# minio_client.upload_document_file(
|
||||
# tenant.id,
|
||||
# document_version.doc_id,
|
||||
# document_version.language,
|
||||
# document_version.id,
|
||||
# output_file,
|
||||
# annotated_transcript.encode('utf-8')
|
||||
# )
|
||||
#
|
||||
# current_app.logger.info(f'Annotated transcription for tenant {tenant.id}')
|
||||
# except Exception as e:
|
||||
# current_app.logger.error(f'Error annotating transcription for tenant {tenant.id}, with error: {e}')
|
||||
# raise
|
||||
|
||||
|
||||
def create_potential_chunks_for_markdown(tenant_id, document_version, input_file):
|
||||
try:
|
||||
current_app.logger.info(f'Creating potential chunks for tenant {tenant_id}')
|
||||
markdown_on = document_version.object_name.rsplit('.', 1)[0] + '.md'
|
||||
|
||||
# Download the markdown file from MinIO
|
||||
markdown_data = minio_client.download_document_file(tenant_id,
|
||||
document_version.doc_id,
|
||||
document_version.language,
|
||||
document_version.id,
|
||||
input_file
|
||||
document_version.bucket_name,
|
||||
markdown_on,
|
||||
)
|
||||
markdown = markdown_data.decode('utf-8')
|
||||
|
||||
|
||||
Binary file not shown.
83
integrations/Wordpress/eveai_sync/README.md
Normal file
83
integrations/Wordpress/eveai_sync/README.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# EveAI Sync WordPress Plugin
|
||||
|
||||
## Description
|
||||
|
||||
EveAI Sync is a WordPress plugin that synchronizes your WordPress content (posts and pages) with the EveAI platform. It allows for seamless integration between your WordPress site and EveAI, ensuring that your content is always up-to-date on both platforms.
|
||||
|
||||
## Features
|
||||
|
||||
- Automatic synchronization of posts and pages with EveAI
|
||||
- Support for excluding specific categories or individual posts/pages from syncing
|
||||
- Bulk synchronization of existing content
|
||||
- Custom metadata synchronization
|
||||
- Easy-to-use admin interface for configuration
|
||||
|
||||
## Installation
|
||||
|
||||
1. Download the plugin zip file.
|
||||
2. Log in to your WordPress admin panel.
|
||||
3. Go to Plugins > Add New.
|
||||
4. Click on the "Upload Plugin" button.
|
||||
5. Select the downloaded zip file and click "Install Now".
|
||||
6. After installation, click "Activate Plugin".
|
||||
|
||||
## Configuration
|
||||
|
||||
1. Go to Settings > EveAI Sync in your WordPress admin panel.
|
||||
2. Enter your EveAI API URL, Tenant ID, and API Key.
|
||||
3. Configure any additional settings as needed.
|
||||
4. Click "Save Changes".
|
||||
|
||||
## Usage
|
||||
|
||||
- New posts and pages will automatically sync to EveAI when published.
|
||||
- Existing content can be synced using the "Bulk Sync" option in the settings.
|
||||
- To exclude a post or page from syncing, use the "Exclude from EveAI sync" checkbox in the post editor.
|
||||
- Categories can be excluded from syncing in the plugin settings.
|
||||
|
||||
## Action Scheduler Dependency
|
||||
|
||||
This plugin uses Action Scheduler for efficient background processing of synchronization tasks. Action Scheduler is typically included with WooCommerce, but the plugin can also function without it.
|
||||
|
||||
### With Action Scheduler
|
||||
|
||||
If Action Scheduler is available (either through WooCommerce or included with this plugin), EveAI Sync will use it for more reliable and efficient scheduling of synchronization tasks.
|
||||
|
||||
### Without Action Scheduler
|
||||
|
||||
If Action Scheduler is not available, the plugin will automatically fall back to using WordPress cron for scheduling tasks. This fallback ensures that the plugin remains functional, although with potentially less precise timing for background tasks.
|
||||
|
||||
No additional configuration is needed; the plugin will automatically detect the presence or absence of Action Scheduler and adjust its behavior accordingly.
|
||||
|
||||
## Versions
|
||||
|
||||
### 1.0.x - Bugfixing Releases
|
||||
|
||||
### 1.0.0 - Initial Release
|
||||
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
**Q: How often does the plugin sync content?**
|
||||
A: The plugin syncs content immediately when a post or page is published or updated. For bulk syncs or when Action Scheduler is not available, the timing may vary based on WordPress cron execution.
|
||||
|
||||
**Q: Can I sync only certain types of content?**
|
||||
A: By default, the plugin syncs all posts and pages. You can exclude specific categories or individual posts/pages from syncing.
|
||||
|
||||
**Q: What happens if the sync fails?**
|
||||
A: The plugin will log any sync failures and attempt to retry. You can view sync status in the plugin's admin interface.
|
||||
|
||||
**Q: Do I need to install Action Scheduler separately?**
|
||||
A: No, the plugin will work with or without Action Scheduler. If you have WooCommerce installed, Action Scheduler will be available automatically.
|
||||
|
||||
## Support
|
||||
|
||||
For support, please open an issue on our GitHub repository or contact our support team at support@eveai.com.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions to the EveAI Sync plugin. Please feel free to submit pull requests or open issues on our GitHub repository.
|
||||
|
||||
## License
|
||||
|
||||
This plugin is licensed under the GPL v2 or later.
|
||||
70
integrations/Wordpress/eveai_sync/admin/css/eveai-admin.css
Normal file
70
integrations/Wordpress/eveai_sync/admin/css/eveai-admin.css
Normal file
@@ -0,0 +1,70 @@
|
||||
.eveai-admin-wrap {
|
||||
max-width: 800px;
|
||||
margin: 20px auto;
|
||||
}
|
||||
|
||||
.eveai-admin-header {
|
||||
background-color: #fff;
|
||||
padding: 20px;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 5px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.eveai-admin-header h1 {
|
||||
margin: 0;
|
||||
color: #23282d;
|
||||
}
|
||||
|
||||
.eveai-admin-content {
|
||||
background-color: #fff;
|
||||
padding: 20px;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
.eveai-form-group {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.eveai-form-group label {
|
||||
display: block;
|
||||
margin-bottom: 5px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.eveai-form-group input[type="text"],
|
||||
.eveai-form-group input[type="password"] {
|
||||
width: 100%;
|
||||
padding: 8px;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.eveai-button {
|
||||
background-color: #0085ba;
|
||||
border-color: #0073aa #006799 #006799;
|
||||
color: #fff;
|
||||
text-decoration: none;
|
||||
text-shadow: 0 -1px 1px #006799, 1px 0 1px #006799, 0 1px 1px #006799, -1px 0 1px #006799;
|
||||
display: inline-block;
|
||||
padding: 8px 12px;
|
||||
border-radius: 3px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.eveai-button:hover {
|
||||
background-color: #008ec2;
|
||||
}
|
||||
|
||||
.eveai-category-list {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.eveai-category-item {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.eveai-category-item label {
|
||||
font-weight: normal;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user