Compare commits
192 Commits
v2.3.1-alf
...
feature/Ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
443104dfc7 | ||
|
|
f2604db5a9 | ||
|
|
78043ab3ef | ||
|
|
95c8282eb8 | ||
|
|
f2bd90e6ae | ||
|
|
04c9d8cf98 | ||
|
|
e8bb66c2c2 | ||
|
|
03f6ef4408 | ||
|
|
5dd711bcd2 | ||
|
|
ee13de7fde | ||
|
|
82ca6b537a | ||
|
|
af37aa7253 | ||
|
|
1748aebd38 | ||
|
|
d6041ebb27 | ||
|
|
b3ee2f7ce9 | ||
|
|
c523250ccb | ||
|
|
a43825f5f0 | ||
|
|
fb261ca0b9 | ||
|
|
3ca2e0a3a9 | ||
|
|
3aa2158a17 | ||
|
|
2bc5832db6 | ||
|
|
1720ddfa11 | ||
|
|
59febb7fbb | ||
|
|
4ec1099925 | ||
|
|
8d1a8d9645 | ||
|
|
1d79a19981 | ||
|
|
aab766fe5e | ||
|
|
05241ecdea | ||
|
|
451f95fbc1 | ||
|
|
842429a659 | ||
|
|
225d494e15 | ||
|
|
5501061dd1 | ||
|
|
eeb76d57b7 | ||
|
|
3ea3a06de6 | ||
|
|
37819cd7e5 | ||
|
|
a798217091 | ||
|
|
83272a4e2a | ||
|
|
b66e2e99ed | ||
|
|
aeee22b305 | ||
|
|
5f387dcef8 | ||
|
|
b499add891 | ||
|
|
2f815616b1 | ||
|
|
f23214bb6d | ||
|
|
6df9aa9c7e | ||
|
|
5465dae52f | ||
|
|
79a3f94ac2 | ||
|
|
06586a1312 | ||
|
|
7b0e3cee7f | ||
|
|
7bef4e69df | ||
|
|
a3e18cb4db | ||
|
|
471b8dd8c3 | ||
|
|
030d1b0e90 | ||
|
|
fa452e4934 | ||
|
|
e24e7265b9 | ||
|
|
a76f87ba75 | ||
|
|
c6fc8ca09a | ||
|
|
16ce59ae98 | ||
|
|
cc47ce2d32 | ||
|
|
b1e9fb71cb | ||
|
|
a57662db3f | ||
|
|
66433f19b3 | ||
|
|
e7397a6d0d | ||
|
|
d097451d42 | ||
|
|
44e5dd5d02 | ||
|
|
3b23be0ea4 | ||
|
|
61ae9c3174 | ||
|
|
b6512b2d8c | ||
|
|
0cd12a8491 | ||
|
|
ae36791ffe | ||
|
|
53bfc6bb23 | ||
|
|
2afee41c2a | ||
|
|
79b1fef5b6 | ||
|
|
2b04692fab | ||
|
|
541d3862e6 | ||
|
|
43fd4ce9c1 | ||
|
|
14ba53e26b | ||
|
|
4ab8b2a714 | ||
|
|
42cb1de0fd | ||
|
|
a325fa5084 | ||
|
|
7cb19ca21e | ||
|
|
6ccba7d1e3 | ||
|
|
6fbaff45a8 | ||
|
|
10ca344c84 | ||
|
|
a9bbd1f466 | ||
|
|
804486664b | ||
|
|
36575c17a8 | ||
|
|
575bfa259e | ||
|
|
362b2fe753 | ||
|
|
5c20e6c1f9 | ||
|
|
b812aedb81 | ||
|
|
d6ea3ba46c | ||
|
|
a6edd5c663 | ||
|
|
6115cc7e13 | ||
|
|
54a9641440 | ||
|
|
af8b5f54cd | ||
|
|
2a0c92b064 | ||
|
|
898bb32318 | ||
|
|
b0e1ad6e03 | ||
|
|
84afc0b2ee | ||
|
|
593dd438aa | ||
|
|
35f58f0c57 | ||
|
|
25ab9ccf23 | ||
|
|
2a4c9d7b00 | ||
|
|
e6c3c24bd8 | ||
|
|
481157fb31 | ||
|
|
376ad328ca | ||
|
|
2bb9d4b0be | ||
|
|
6eae0ab1a3 | ||
|
|
4395d2e407 | ||
|
|
da61f5f9ec | ||
|
|
53283b6687 | ||
|
|
5d715a958c | ||
|
|
0f969972d6 | ||
|
|
4c00d33bc3 | ||
|
|
9c63ecb17f | ||
|
|
d6a2635e50 | ||
|
|
84a9334c80 | ||
|
|
066f579294 | ||
|
|
ebf92b0474 | ||
|
|
7e35549262 | ||
|
|
866cc2a60d | ||
|
|
ed87d73c5a | ||
|
|
212ea28de8 | ||
|
|
cea38e02d2 | ||
|
|
248fae500a | ||
|
|
4d6466038f | ||
|
|
9a88582fff | ||
|
|
998ddf4c03 | ||
|
|
dabf97c96e | ||
|
|
5e81595622 | ||
|
|
ef138462d9 | ||
|
|
42ffe3795f | ||
|
|
ba523a95c5 | ||
|
|
8a85b4540f | ||
|
|
fc3cae1986 | ||
|
|
32df3d0589 | ||
|
|
ccc1a2afb8 | ||
|
|
f16ed85e82 | ||
|
|
e990fe65d8 | ||
|
|
32cf105d7b | ||
|
|
dc6cd9d940 | ||
|
|
a0f806ba4e | ||
|
|
98db88b00b | ||
|
|
4ad621428e | ||
|
|
0f33beddf4 | ||
|
|
f8f941d1e1 | ||
|
|
abc0a50dcc | ||
|
|
854d889413 | ||
|
|
7bbc32e381 | ||
|
|
e75c49d2fa | ||
|
|
ccb844c15c | ||
|
|
b60600e9f6 | ||
|
|
11b1d548bd | ||
|
|
f3a243698c | ||
|
|
000636a229 | ||
|
|
acad28b623 | ||
|
|
42635a583c | ||
|
|
7d7db296d3 | ||
|
|
51fd16bcc6 | ||
|
|
509ee95d81 | ||
|
|
33b5742d2f | ||
|
|
50773fe602 | ||
|
|
51d029d960 | ||
|
|
fbc9f44ac8 | ||
|
|
4338f09f5c | ||
|
|
53e32a67bd | ||
|
|
fda267b479 | ||
|
|
f5c9542a49 | ||
|
|
043cea45f2 | ||
|
|
7b87880045 | ||
|
|
5b2c04501c | ||
|
|
babcd6ec04 | ||
|
|
71adf64668 | ||
|
|
dbea41451a | ||
|
|
82e25b356c | ||
|
|
3c7460f741 | ||
|
|
2835486599 | ||
|
|
f1c60f9574 | ||
|
|
b326c0c6f2 | ||
|
|
5f1a5711f6 | ||
|
|
67ceb57b79 | ||
|
|
23b49516cb | ||
|
|
9cc266b97f | ||
|
|
3f77871c4f | ||
|
|
199cf94cf2 | ||
|
|
c4dcd6a0d3 | ||
|
|
43ee9139d6 | ||
|
|
8f45005713 | ||
|
|
bc1626c4ff | ||
|
|
57c0e7a1ba | ||
|
|
0d05499d2b | ||
|
|
b4e58659a8 |
19
.aiignore
Normal file
19
.aiignore
Normal file
@@ -0,0 +1,19 @@
|
||||
# An .aiignore file follows the same syntax as a .gitignore file.
|
||||
# .gitignore documentation: https://git-scm.com/docs/gitignore
|
||||
|
||||
# you can ignore files
|
||||
.DS_Store
|
||||
*.log
|
||||
*.tmp
|
||||
|
||||
# or folders
|
||||
dist/
|
||||
build/
|
||||
out/
|
||||
nginx/node_modules/
|
||||
nginx/static/
|
||||
db_backups/
|
||||
docker/eveai_logs/
|
||||
docker/logs/
|
||||
docker/minio/
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -53,3 +53,8 @@ scripts/__pycache__/run_eveai_app.cpython-312.pyc
|
||||
/docker/grafana/data/
|
||||
/temp_requirements/
|
||||
/nginx/node_modules/
|
||||
/nginx/.parcel-cache/
|
||||
/nginx/static/
|
||||
/docker/build_logs/
|
||||
/content/.Ulysses-Group.plist
|
||||
/content/.Ulysses-Settings.plist
|
||||
|
||||
32
check_running_services.sh
Normal file
32
check_running_services.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
# Diagnostic script to check what services are running
|
||||
|
||||
echo "=== KIND CLUSTER STATUS ==="
|
||||
echo "Namespaces:"
|
||||
kubectl get namespaces | grep eveai
|
||||
|
||||
echo -e "\nPods in eveai-dev:"
|
||||
kubectl get pods -n eveai-dev
|
||||
|
||||
echo -e "\nServices in eveai-dev:"
|
||||
kubectl get services -n eveai-dev
|
||||
|
||||
echo -e "\n=== TEST CONTAINERS STATUS ==="
|
||||
echo "Running test containers:"
|
||||
podman ps | grep eveai_test
|
||||
|
||||
echo -e "\n=== PORT ANALYSIS ==="
|
||||
echo "What's listening on port 3080:"
|
||||
lsof -i :3080 2>/dev/null || echo "Nothing found"
|
||||
|
||||
echo -e "\nWhat's listening on port 4080:"
|
||||
lsof -i :4080 2>/dev/null || echo "Nothing found"
|
||||
|
||||
echo -e "\n=== SOLUTION ==="
|
||||
echo "The application you see is from TEST CONTAINERS (6 days old),"
|
||||
echo "NOT from the Kind cluster (3 minutes old)."
|
||||
echo ""
|
||||
echo "To test Kind cluster:"
|
||||
echo "1. Stop test containers: podman stop eveai_test_nginx_1 eveai_test_eveai_app_1"
|
||||
echo "2. Deploy Kind services: kup-all-structured"
|
||||
echo "3. Restart test containers if needed"
|
||||
@@ -44,7 +44,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings):
|
||||
for i in range(0, len(texts), self.batch_size):
|
||||
batch = texts[i:i + self.batch_size]
|
||||
batch_num = i // self.batch_size + 1
|
||||
current_app.logger.debug(f"Processing embedding batch {batch_num}, size: {len(batch)}")
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
@@ -70,9 +69,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings):
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
|
||||
current_app.logger.debug(f"Batch {batch_num} processed: {len(batch)} texts, "
|
||||
f"{result.usage.total_tokens} tokens, {batch_time:.2f}s")
|
||||
|
||||
# If processing multiple batches, add a small delay to avoid rate limits
|
||||
if len(texts) > self.batch_size and i + self.batch_size < len(texts):
|
||||
time.sleep(0.25) # 250ms pause between batches
|
||||
@@ -82,7 +78,6 @@ class TrackedMistralAIEmbeddings(EveAIEmbeddings):
|
||||
# If a batch fails, try to process each text individually
|
||||
for j, text in enumerate(batch):
|
||||
try:
|
||||
current_app.logger.debug(f"Attempting individual embedding for item {i + j}")
|
||||
single_start_time = time.time()
|
||||
single_result = self.client.embeddings.create(
|
||||
model=self.model,
|
||||
|
||||
@@ -11,6 +11,7 @@ from flask_restx import Api
|
||||
from prometheus_flask_exporter import PrometheusMetrics
|
||||
|
||||
from .utils.cache.eveai_cache_manager import EveAICacheManager
|
||||
from .utils.content_utils import ContentManager
|
||||
from .utils.simple_encryption import SimpleEncryption
|
||||
from .utils.minio_utils import MinioClient
|
||||
|
||||
@@ -30,4 +31,5 @@ simple_encryption = SimpleEncryption()
|
||||
minio_client = MinioClient()
|
||||
metrics = PrometheusMetrics.for_app_factory()
|
||||
cache_manager = EveAICacheManager()
|
||||
content_manager = ContentManager()
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from langchain.callbacks.base import BaseCallbackHandler
|
||||
from typing import Dict, Any, List
|
||||
from langchain.schema import LLMResult
|
||||
from common.utils.business_event_context import current_event
|
||||
from flask import current_app
|
||||
|
||||
|
||||
class LLMMetricsHandler(BaseCallbackHandler):
|
||||
|
||||
47
common/langchain/persistent_llm_metrics_handler.py
Normal file
47
common/langchain/persistent_llm_metrics_handler.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import time
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from typing import Dict, Any, List
|
||||
from langchain.schema import LLMResult
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class PersistentLLMMetricsHandler(BaseCallbackHandler):
|
||||
"""Metrics handler that allows metrics to be retrieved from within any call. In case metrics are required for other
|
||||
purposes than business event logging."""
|
||||
|
||||
def __init__(self):
|
||||
self.total_tokens: int = 0
|
||||
self.prompt_tokens: int = 0
|
||||
self.completion_tokens: int = 0
|
||||
self.start_time: float = 0
|
||||
self.end_time: float = 0
|
||||
self.total_time: float = 0
|
||||
|
||||
def reset(self):
|
||||
self.total_tokens = 0
|
||||
self.prompt_tokens = 0
|
||||
self.completion_tokens = 0
|
||||
self.start_time = 0
|
||||
self.end_time = 0
|
||||
self.total_time = 0
|
||||
|
||||
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
|
||||
self.start_time = time.time()
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
self.end_time = time.time()
|
||||
self.total_time = self.end_time - self.start_time
|
||||
|
||||
usage = response.llm_output.get('token_usage', {})
|
||||
self.prompt_tokens += usage.get('prompt_tokens', 0)
|
||||
self.completion_tokens += usage.get('completion_tokens', 0)
|
||||
self.total_tokens = self.prompt_tokens + self.completion_tokens
|
||||
|
||||
def get_metrics(self) -> Dict[str, int | float]:
|
||||
return {
|
||||
'total_tokens': self.total_tokens,
|
||||
'prompt_tokens': self.prompt_tokens,
|
||||
'completion_tokens': self.completion_tokens,
|
||||
'time_elapsed': self.total_time,
|
||||
'interaction_type': 'LLM',
|
||||
}
|
||||
@@ -8,9 +8,10 @@ import sqlalchemy as sa
|
||||
|
||||
class Catalog(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.String(50), nullable=False)
|
||||
name = db.Column(db.String(50), nullable=False, unique=True)
|
||||
description = db.Column(db.Text, nullable=True)
|
||||
type = db.Column(db.String(50), nullable=False, default="STANDARD_CATALOG")
|
||||
type_version = db.Column(db.String(20), nullable=True, default="1.0.0")
|
||||
|
||||
min_chunk_size = db.Column(db.Integer, nullable=True, default=1500)
|
||||
max_chunk_size = db.Column(db.Integer, nullable=True, default=2500)
|
||||
@@ -26,6 +27,20 @@ class Catalog(db.Model):
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey(User.id))
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'type': self.type,
|
||||
'type_version': self.type_version,
|
||||
'min_chunk_size': self.min_chunk_size,
|
||||
'max_chunk_size': self.max_chunk_size,
|
||||
'user_metadata': self.user_metadata,
|
||||
'system_metadata': self.system_metadata,
|
||||
'configuration': self.configuration,
|
||||
}
|
||||
|
||||
|
||||
class Processor(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
@@ -34,6 +49,7 @@ class Processor(db.Model):
|
||||
catalog_id = db.Column(db.Integer, db.ForeignKey('catalog.id'), nullable=True)
|
||||
type = db.Column(db.String(50), nullable=False)
|
||||
sub_file_type = db.Column(db.String(50), nullable=True)
|
||||
active = db.Column(db.Boolean, nullable=True, default=True)
|
||||
|
||||
# Tuning enablers
|
||||
tuning = db.Column(db.Boolean, nullable=True, default=False)
|
||||
@@ -89,6 +105,12 @@ class Document(db.Model):
|
||||
# Relations
|
||||
versions = db.relationship('DocumentVersion', backref='document', lazy=True)
|
||||
|
||||
@property
|
||||
def latest_version(self):
|
||||
"""Returns the latest document version (the one with highest id)"""
|
||||
from sqlalchemy import desc
|
||||
return DocumentVersion.query.filter_by(doc_id=self.id).order_by(desc(DocumentVersion.id)).first()
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Document {self.id}: {self.name}>"
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
|
||||
from ..extensions import db
|
||||
from .user import User, Tenant
|
||||
from .user import User, Tenant, TenantMake
|
||||
from .document import Embedding, Retriever
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ class Specialist(db.Model):
|
||||
tuning = db.Column(db.Boolean, nullable=True, default=False)
|
||||
configuration = db.Column(JSONB, nullable=True)
|
||||
arguments = db.Column(JSONB, nullable=True)
|
||||
active = db.Column(db.Boolean, nullable=True, default=True)
|
||||
|
||||
# Relationship to retrievers through the association table
|
||||
retrievers = db.relationship('SpecialistRetriever', backref='specialist', lazy=True,
|
||||
@@ -44,6 +45,21 @@ class Specialist(db.Model):
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey(User.id))
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Specialist {self.id}: {self.name}>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'type': self.type,
|
||||
'type_version': self.type_version,
|
||||
'configuration': self.configuration,
|
||||
'arguments': self.arguments,
|
||||
'active': self.active,
|
||||
}
|
||||
|
||||
|
||||
class EveAIAsset(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
@@ -51,25 +67,23 @@ class EveAIAsset(db.Model):
|
||||
description = db.Column(db.Text, nullable=True)
|
||||
type = db.Column(db.String(50), nullable=False, default="DOCUMENT_TEMPLATE")
|
||||
type_version = db.Column(db.String(20), nullable=True, default="1.0.0")
|
||||
valid_from = db.Column(db.DateTime, nullable=True)
|
||||
valid_to = db.Column(db.DateTime, nullable=True)
|
||||
|
||||
# Versioning Information
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey(User.id), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey(User.id))
|
||||
|
||||
# Relations
|
||||
versions = db.relationship('EveAIAssetVersion', backref='asset', lazy=True)
|
||||
|
||||
|
||||
class EveAIAssetVersion(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
asset_id = db.Column(db.Integer, db.ForeignKey(EveAIAsset.id), nullable=False)
|
||||
# Storage information
|
||||
bucket_name = db.Column(db.String(255), nullable=True)
|
||||
object_name = db.Column(db.String(200), nullable=True)
|
||||
file_type = db.Column(db.String(20), nullable=True)
|
||||
file_size = db.Column(db.Float, nullable=True)
|
||||
|
||||
# Metadata information
|
||||
user_metadata = db.Column(JSONB, nullable=True)
|
||||
system_metadata = db.Column(JSONB, nullable=True)
|
||||
|
||||
# Configuration information
|
||||
configuration = db.Column(JSONB, nullable=True)
|
||||
arguments = db.Column(JSONB, nullable=True)
|
||||
|
||||
# Cost information
|
||||
prompt_tokens = db.Column(db.Integer, nullable=True)
|
||||
completion_tokens = db.Column(db.Integer, nullable=True)
|
||||
|
||||
# Versioning Information
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
@@ -77,25 +91,25 @@ class EveAIAssetVersion(db.Model):
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey(User.id))
|
||||
|
||||
# Relations
|
||||
instructions = db.relationship('EveAIAssetInstruction', backref='asset_version', lazy=True)
|
||||
last_used_at = db.Column(db.DateTime, nullable=True)
|
||||
|
||||
|
||||
class EveAIAssetInstruction(db.Model):
|
||||
class EveAIDataCapsule(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
asset_version_id = db.Column(db.Integer, db.ForeignKey(EveAIAssetVersion.id), nullable=False)
|
||||
name = db.Column(db.String(255), nullable=False)
|
||||
content = db.Column(db.Text, nullable=True)
|
||||
chat_session_id = db.Column(db.Integer, db.ForeignKey(ChatSession.id), nullable=False)
|
||||
type = db.Column(db.String(50), nullable=False, default="STANDARD_RAG")
|
||||
type_version = db.Column(db.String(20), nullable=True, default="1.0.0")
|
||||
configuration = db.Column(JSONB, nullable=True)
|
||||
data = db.Column(JSONB, nullable=True)
|
||||
|
||||
# Versioning Information
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey(User.id), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey(User.id))
|
||||
|
||||
class EveAIProcessedAsset(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
asset_version_id = db.Column(db.Integer, db.ForeignKey(EveAIAssetVersion.id), nullable=False)
|
||||
specialist_id = db.Column(db.Integer, db.ForeignKey(Specialist.id), nullable=True)
|
||||
chat_session_id = db.Column(db.Integer, db.ForeignKey(ChatSession.id), nullable=True)
|
||||
bucket_name = db.Column(db.String(255), nullable=True)
|
||||
object_name = db.Column(db.String(255), nullable=True)
|
||||
created_at = db.Column(db.DateTime, nullable=True, server_default=db.func.now())
|
||||
# Unieke constraint voor chat_session_id, type en type_version
|
||||
__table_args__ = (db.UniqueConstraint('chat_session_id', 'type', 'type_version', name='uix_data_capsule_session_type_version'),)
|
||||
|
||||
|
||||
class EveAIAgent(db.Model):
|
||||
@@ -108,6 +122,8 @@ class EveAIAgent(db.Model):
|
||||
role = db.Column(db.Text, nullable=True)
|
||||
goal = db.Column(db.Text, nullable=True)
|
||||
backstory = db.Column(db.Text, nullable=True)
|
||||
temperature = db.Column(db.Float, nullable=True)
|
||||
llm_model = db.Column(db.String(50), nullable=True)
|
||||
tuning = db.Column(db.Boolean, nullable=True, default=False)
|
||||
configuration = db.Column(JSONB, nullable=True)
|
||||
arguments = db.Column(JSONB, nullable=True)
|
||||
@@ -215,3 +231,36 @@ class SpecialistDispatcher(db.Model):
|
||||
dispatcher_id = db.Column(db.Integer, db.ForeignKey(Dispatcher.id, ondelete='CASCADE'), primary_key=True)
|
||||
|
||||
dispatcher = db.relationship("Dispatcher", backref="specialist_dispatchers")
|
||||
|
||||
|
||||
class SpecialistMagicLink(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.String(50), nullable=False)
|
||||
description = db.Column(db.Text, nullable=True)
|
||||
specialist_id = db.Column(db.Integer, db.ForeignKey(Specialist.id, ondelete='CASCADE'), nullable=False)
|
||||
tenant_make_id = db.Column(db.Integer, db.ForeignKey(TenantMake.id, ondelete='CASCADE'), nullable=True)
|
||||
magic_link_code = db.Column(db.String(55), nullable=False, unique=True)
|
||||
|
||||
valid_from = db.Column(db.DateTime, nullable=True)
|
||||
valid_to = db.Column(db.DateTime, nullable=True)
|
||||
|
||||
specialist_args = db.Column(JSONB, nullable=True)
|
||||
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey(User.id), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey(User.id))
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SpecialistMagicLink {self.specialist_id} {self.magic_link_code}>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'magic_link_code': self.magic_link_code,
|
||||
'valid_from': self.valid_from,
|
||||
'valid_to': self.valid_to,
|
||||
'specialist_args': self.specialist_args,
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from datetime import date
|
||||
from enum import Enum
|
||||
|
||||
from common.extensions import db
|
||||
from flask_security import UserMixin, RoleMixin
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
|
||||
import sqlalchemy as sa
|
||||
|
||||
from common.models.entitlements import License
|
||||
@@ -26,19 +27,18 @@ class Tenant(db.Model):
|
||||
timezone = db.Column(db.String(50), nullable=True, default='UTC')
|
||||
type = db.Column(db.String(20), nullable=True, server_default='Active')
|
||||
|
||||
# language information
|
||||
default_language = db.Column(db.String(2), nullable=True)
|
||||
allowed_languages = db.Column(ARRAY(sa.String(2)), nullable=True)
|
||||
|
||||
# Entitlements
|
||||
currency = db.Column(db.String(20), nullable=True)
|
||||
storage_dirty = db.Column(db.Boolean, nullable=True, default=False)
|
||||
default_tenant_make_id = db.Column(db.Integer, db.ForeignKey('public.tenant_make.id'), nullable=True)
|
||||
|
||||
# Relations
|
||||
users = db.relationship('User', backref='tenant')
|
||||
domains = db.relationship('TenantDomain', backref='tenant')
|
||||
licenses = db.relationship('License', back_populates='tenant')
|
||||
license_usages = db.relationship('LicenseUsage', backref='tenant')
|
||||
tenant_makes = db.relationship('TenantMake', backref='tenant', foreign_keys='TenantMake.tenant_id')
|
||||
default_tenant_make = db.relationship('TenantMake', foreign_keys=[default_tenant_make_id], uselist=False)
|
||||
|
||||
@property
|
||||
def current_license(self):
|
||||
@@ -59,9 +59,8 @@ class Tenant(db.Model):
|
||||
'website': self.website,
|
||||
'timezone': self.timezone,
|
||||
'type': self.type,
|
||||
'default_language': self.default_language,
|
||||
'allowed_languages': self.allowed_languages,
|
||||
'currency': self.currency,
|
||||
'default_tenant_make_id': self.default_tenant_make_id,
|
||||
}
|
||||
|
||||
|
||||
@@ -123,7 +122,6 @@ class User(db.Model, UserMixin):
|
||||
def has_roles(self, *args):
|
||||
return any(role.name in args for role in self.roles)
|
||||
|
||||
|
||||
class TenantDomain(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
@@ -173,6 +171,46 @@ class TenantProject(db.Model):
|
||||
return f"<TenantProject {self.id}: {self.name}>"
|
||||
|
||||
|
||||
class TenantMake(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
tenant_id = db.Column(db.Integer, db.ForeignKey('public.tenant.id'), nullable=False)
|
||||
name = db.Column(db.String(50), nullable=False, unique=True)
|
||||
description = db.Column(db.Text, nullable=True)
|
||||
active = db.Column(db.Boolean, nullable=False, default=True)
|
||||
website = db.Column(db.String(255), nullable=True)
|
||||
logo_url = db.Column(db.String(255), nullable=True)
|
||||
default_language = db.Column(db.String(2), nullable=True)
|
||||
allowed_languages = db.Column(ARRAY(sa.String(2)), nullable=True)
|
||||
|
||||
# Chat customisation options
|
||||
chat_customisation_options = db.Column(JSONB, nullable=True)
|
||||
|
||||
# Versioning Information
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey('public.user.id'))
|
||||
|
||||
def __repr__(self):
|
||||
return f"<TenantMake {self.id} for tenant {self.tenant_id}: {self.name}>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'active': self.active,
|
||||
'website': self.website,
|
||||
'logo_url': self.logo_url,
|
||||
'chat_customisation_options': self.chat_customisation_options,
|
||||
'allowed_languages': self.allowed_languages,
|
||||
'default_language': self.default_language,
|
||||
}
|
||||
|
||||
|
||||
class Partner(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
@@ -271,3 +309,91 @@ class PartnerTenant(db.Model):
|
||||
created_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
|
||||
|
||||
class TenantConsent(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
tenant_id = db.Column(db.Integer, db.ForeignKey('public.tenant.id'), nullable=False)
|
||||
partner_id = db.Column(db.Integer, db.ForeignKey('public.partner.id'), nullable=True)
|
||||
partner_service_id = db.Column(db.Integer, db.ForeignKey('public.partner_service.id'), nullable=True)
|
||||
user_id = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=False)
|
||||
consent_type = db.Column(db.String(50), nullable=False)
|
||||
consent_date = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
consent_version = db.Column(db.String(20), nullable=False, default="1.0.0")
|
||||
consent_data = db.Column(db.JSON, nullable=False)
|
||||
|
||||
# Tracking
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
|
||||
|
||||
class ConsentVersion(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
consent_type = db.Column(db.String(50), nullable=False)
|
||||
consent_version = db.Column(db.String(20), nullable=False)
|
||||
consent_valid_from = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
consent_valid_to = db.Column(db.DateTime, nullable=True)
|
||||
|
||||
# Tracking
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
|
||||
|
||||
class ConsentStatus(str, Enum):
|
||||
CONSENTED = 'CONSENTED'
|
||||
NOT_CONSENTED = 'NOT_CONSENTED'
|
||||
RENEWAL_REQUIRED = 'RENEWAL_REQUIRED'
|
||||
CONSENT_EXPIRED = 'CONSENT_EXPIRED'
|
||||
UNKNOWN_CONSENT_VERSION = 'UNKNOWN_CONSENT_VERSION'
|
||||
|
||||
class SpecialistMagicLinkTenant(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
magic_link_code = db.Column(db.String(55), primary_key=True)
|
||||
tenant_id = db.Column(db.Integer, db.ForeignKey('public.tenant.id'), nullable=False)
|
||||
|
||||
|
||||
class TranslationCache(db.Model):
|
||||
__bind_key__ = 'public'
|
||||
__table_args__ = {'schema': 'public'}
|
||||
|
||||
cache_key = db.Column(db.String(16), primary_key=True)
|
||||
source_text = db.Column(db.Text, nullable=False)
|
||||
translated_text = db.Column(db.Text, nullable=False)
|
||||
source_language = db.Column(db.String(2), nullable=True)
|
||||
target_language = db.Column(db.String(2), nullable=False)
|
||||
context = db.Column(db.Text, nullable=True)
|
||||
|
||||
# Translation cost
|
||||
prompt_tokens = db.Column(db.Integer, nullable=False)
|
||||
completion_tokens = db.Column(db.Integer, nullable=False)
|
||||
|
||||
# Tracking
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now())
|
||||
created_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.func.now(), onupdate=db.func.now())
|
||||
updated_by = db.Column(db.Integer, db.ForeignKey('public.user.id'), nullable=True)
|
||||
|
||||
last_used_at = db.Column(db.DateTime, nullable=True)
|
||||
|
||||
|
||||
# class PartnerRAGRetriever(db.Model):
|
||||
# __bind_key__ = 'public'
|
||||
# __table_args__ = (
|
||||
# db.PrimaryKeyConstraint('tenant_id', 'retriever_id'),
|
||||
# db.UniqueConstraint('partner_id', 'tenant_id', 'retriever_id'),
|
||||
# {'schema': 'public'},
|
||||
# )
|
||||
#
|
||||
# partner_id = db.Column(db.Integer, db.ForeignKey('public.partner.id'), nullable=False)
|
||||
# tenant_id = db.Column(db.Integer, db.ForeignKey('public.tenant.id'), nullable=False)
|
||||
# retriever_id = db.Column(db.Integer, nullable=False)
|
||||
|
||||
@@ -41,7 +41,7 @@ class LicensePeriodServices:
|
||||
current_app.logger.debug(f"Found license period {license_period.id} for tenant {tenant_id} "
|
||||
f"with status {license_period.status}")
|
||||
match license_period.status:
|
||||
case PeriodStatus.UPCOMING:
|
||||
case PeriodStatus.UPCOMING | PeriodStatus.PENDING:
|
||||
current_app.logger.debug(f"In upcoming state")
|
||||
LicensePeriodServices._complete_last_license_period(tenant_id=tenant_id)
|
||||
current_app.logger.debug(f"Completed last license period for tenant {tenant_id}")
|
||||
@@ -71,10 +71,10 @@ class LicensePeriodServices:
|
||||
delta = abs(current_date - license_period.period_start)
|
||||
if delta > timedelta(days=current_app.config.get('ENTITLEMENTS_MAX_PENDING_DAYS', 5)):
|
||||
raise EveAIPendingLicensePeriod()
|
||||
else:
|
||||
return license_period
|
||||
case PeriodStatus.ACTIVE:
|
||||
return license_period
|
||||
case PeriodStatus.PENDING:
|
||||
return license_period
|
||||
else:
|
||||
raise EveAILicensePeriodsExceeded(license_id=None)
|
||||
except SQLAlchemyError as e:
|
||||
@@ -125,7 +125,7 @@ class LicensePeriodServices:
|
||||
tenant_id=tenant_id,
|
||||
period_number=next_period_number,
|
||||
period_start=the_license.start_date + relativedelta(months=next_period_number-1),
|
||||
period_end=the_license.end_date + relativedelta(months=next_period_number, days=-1),
|
||||
period_end=the_license.start_date + relativedelta(months=next_period_number, days=-1),
|
||||
status=PeriodStatus.UPCOMING,
|
||||
upcoming_at=dt.now(tz.utc),
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ from sqlalchemy.exc import SQLAlchemyError
|
||||
from common.extensions import db
|
||||
from common.models.entitlements import PartnerServiceLicenseTier
|
||||
from common.models.user import Partner
|
||||
from common.utils.eveai_exceptions import EveAINoManagementPartnerService
|
||||
from common.utils.eveai_exceptions import EveAINoManagementPartnerService, EveAINoSessionPartner
|
||||
from common.utils.model_logging_utils import set_logging_information
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ class LicenseTierServices:
|
||||
# Get partner service (MANAGEMENT_SERVICE type)
|
||||
partner = Partner.query.get(partner_id)
|
||||
if not partner:
|
||||
return
|
||||
raise EveAINoSessionPartner()
|
||||
|
||||
# Find a management service for this partner
|
||||
management_service = next((service for service in session['partner']['services']
|
||||
|
||||
9
common/services/interaction/asset_services.py
Normal file
9
common/services/interaction/asset_services.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from common.models.interaction import EveAIAsset
|
||||
from common.extensions import minio_client
|
||||
|
||||
|
||||
class AssetServices:
|
||||
|
||||
@staticmethod
|
||||
def add_or_replace_asset_file(asset_id, file_data):
|
||||
asset = EveAIAsset.query.get_or_404(asset_id)
|
||||
25
common/services/interaction/capsule_services.py
Normal file
25
common/services/interaction/capsule_services.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
from common.models.interaction import EveAIDataCapsule
|
||||
from common.extensions import db
|
||||
from common.utils.model_logging_utils import set_logging_information, update_logging_information
|
||||
|
||||
|
||||
class CapsuleServices:
|
||||
@staticmethod
|
||||
def push_capsule_data(chat_session_id: str, type: str, type_version: str, configuration: dict, data: dict):
|
||||
capsule = EveAIDataCapsule.query.filter_by(chat_session_id=chat_session_id, type=type, type_version=type_version).first()
|
||||
if capsule:
|
||||
# Update bestaande capsule als deze al bestaat
|
||||
capsule.configuration = configuration
|
||||
capsule.data = data
|
||||
update_logging_information(capsule, dt.now(tz.utc))
|
||||
else:
|
||||
# Maak nieuwe capsule aan als deze nog niet bestaat
|
||||
capsule = EveAIDataCapsule(chat_session_id=chat_session_id, type=type, type_version=type_version,
|
||||
configuration=configuration, data=data)
|
||||
set_logging_information(capsule, dt.now(tz.utc))
|
||||
db.session.add(capsule)
|
||||
|
||||
db.session.commit()
|
||||
return capsule
|
||||
@@ -19,6 +19,7 @@ class SpecialistServices:
|
||||
|
||||
@staticmethod
|
||||
def execute_specialist(tenant_id, specialist_id, specialist_arguments, session_id, user_timezone) -> Dict[str, Any]:
|
||||
current_app.logger.debug(f"Before sending task for {specialist_id} with arguments {specialist_arguments}")
|
||||
task = current_celery.send_task(
|
||||
'execute_specialist',
|
||||
args=[tenant_id,
|
||||
@@ -29,6 +30,7 @@ class SpecialistServices:
|
||||
],
|
||||
queue='llm_interactions'
|
||||
)
|
||||
current_app.logger.debug(f"Task sent for {specialist_id}, task ID: {task.id}")
|
||||
|
||||
return {
|
||||
'task_id': task.id,
|
||||
@@ -220,3 +222,18 @@ class SpecialistServices:
|
||||
db.session.add(tool)
|
||||
current_app.logger.info(f"Created tool {tool.id} of type {tool_type}")
|
||||
return tool
|
||||
|
||||
@staticmethod
|
||||
def get_specialist_system_field(specialist_id, config_name, system_name):
|
||||
"""Get the value of a system field in a specialist's configuration. Returns the actual value, or None."""
|
||||
specialist = Specialist.query.get(specialist_id)
|
||||
if not specialist:
|
||||
raise ValueError(f"Specialist with ID {specialist_id} not found")
|
||||
config = cache_manager.specialists_config_cache.get_config(specialist.type, specialist.type_version)
|
||||
if not config:
|
||||
raise ValueError(f"No configuration found for {specialist.type} version {specialist.version}")
|
||||
potential_field = config.get(config_name, None)
|
||||
if potential_field:
|
||||
if potential_field.type == 'system' and potential_field.system_name == system_name:
|
||||
return specialist.configuration.get(config_name, None)
|
||||
return None
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from common.services.user.user_services import UserServices
|
||||
from common.services.user.partner_services import PartnerServices
|
||||
from common.services.user.tenant_services import TenantServices
|
||||
from common.services.user.consent_services import ConsentServices
|
||||
|
||||
__all__ = ['UserServices', 'PartnerServices', 'TenantServices']
|
||||
__all__ = ['UserServices', 'PartnerServices', 'TenantServices', 'ConsentServices']
|
||||
254
common/services/user/consent_services.py
Normal file
254
common/services/user/consent_services.py
Normal file
@@ -0,0 +1,254 @@
|
||||
from __future__ import annotations
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from typing import List, Optional, Tuple, Dict
|
||||
|
||||
from flask import current_app, request, session
|
||||
from flask_security import current_user
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
|
||||
|
||||
from common.extensions import db
|
||||
from common.models.user import TenantConsent, ConsentVersion, ConsentStatus, PartnerService, PartnerTenant, Tenant
|
||||
|
||||
|
||||
@dataclass
|
||||
class TypeStatus:
|
||||
consent_type: str
|
||||
status: ConsentStatus
|
||||
active_version: Optional[str]
|
||||
last_version: Optional[str]
|
||||
|
||||
|
||||
class ConsentServices:
|
||||
@staticmethod
|
||||
def get_required_consent_types() -> List[str]:
|
||||
return list(current_app.config.get("CONSENT_TYPES", []))
|
||||
|
||||
@staticmethod
|
||||
def get_active_consent_version(consent_type: str) -> Optional[ConsentVersion]:
|
||||
try:
|
||||
# Active version: the one with consent_valid_to IS NULL, latest for this type
|
||||
return (ConsentVersion.query
|
||||
.filter_by(consent_type=consent_type, consent_valid_to=None)
|
||||
.order_by(desc(ConsentVersion.consent_valid_from))
|
||||
.first())
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f"DB error in get_active_consent_version({consent_type}): {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_tenant_last_consent(tenant_id: int, consent_type: str) -> Optional[TenantConsent]:
|
||||
try:
|
||||
return (TenantConsent.query
|
||||
.filter_by(tenant_id=tenant_id, consent_type=consent_type)
|
||||
.order_by(desc(TenantConsent.id))
|
||||
.first())
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f"DB error in get_tenant_last_consent({tenant_id}, {consent_type}): {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def evaluate_type_status(tenant_id: int, consent_type: str) -> TypeStatus:
|
||||
active = ConsentServices.get_active_consent_version(consent_type)
|
||||
if not active:
|
||||
current_app.logger.error(f"No active ConsentVersion found for type {consent_type}")
|
||||
return TypeStatus(consent_type, ConsentStatus.UNKNOWN_CONSENT_VERSION, None, None)
|
||||
|
||||
last = ConsentServices.get_tenant_last_consent(tenant_id, consent_type)
|
||||
if not last:
|
||||
return TypeStatus(consent_type, ConsentStatus.NOT_CONSENTED, active.consent_version, None)
|
||||
|
||||
# If last consent equals active → CONSENTED
|
||||
if last.consent_version == active.consent_version:
|
||||
return TypeStatus(consent_type, ConsentStatus.CONSENTED, active.consent_version, last.consent_version)
|
||||
|
||||
# Else: last refers to an older version; check its ConsentVersion to see grace period
|
||||
prev_cv = ConsentVersion.query.filter_by(consent_type=consent_type,
|
||||
consent_version=last.consent_version).first()
|
||||
if not prev_cv:
|
||||
current_app.logger.error(f"Tenant {tenant_id} references unknown ConsentVersion {last.consent_version} for {consent_type}")
|
||||
return TypeStatus(consent_type, ConsentStatus.UNKNOWN_CONSENT_VERSION, active.consent_version, last.consent_version)
|
||||
|
||||
if prev_cv.consent_valid_to:
|
||||
now = dt.now(tz.utc)
|
||||
if prev_cv.consent_valid_to >= now:
|
||||
# Within transition window
|
||||
return TypeStatus(consent_type, ConsentStatus.RENEWAL_REQUIRED, active.consent_version, last.consent_version)
|
||||
else:
|
||||
return TypeStatus(consent_type, ConsentStatus.NOT_CONSENTED, active.consent_version, last.consent_version)
|
||||
else:
|
||||
# Should not happen if a newer active exists; treat as unknown config
|
||||
current_app.logger.error(f"Previous ConsentVersion without valid_to while a newer active exists for {consent_type}")
|
||||
return TypeStatus(consent_type, ConsentStatus.UNKNOWN_CONSENT_VERSION, active.consent_version, last.consent_version)
|
||||
|
||||
@staticmethod
|
||||
def aggregate_status(type_statuses: List[TypeStatus]) -> ConsentStatus:
|
||||
# Priority: UNKNOWN > NOT_CONSENTED > RENEWAL_REQUIRED > CONSENTED
|
||||
priorities = {
|
||||
ConsentStatus.UNKNOWN_CONSENT_VERSION: 4,
|
||||
ConsentStatus.NOT_CONSENTED: 3,
|
||||
ConsentStatus.RENEWAL_REQUIRED: 2,
|
||||
ConsentStatus.CONSENTED: 1,
|
||||
}
|
||||
if not type_statuses:
|
||||
return ConsentStatus.CONSENTED
|
||||
worst = max(type_statuses, key=lambda ts: priorities.get(ts.status, 0))
|
||||
return worst.status
|
||||
|
||||
@staticmethod
|
||||
def get_consent_status(tenant_id: int) -> ConsentStatus:
|
||||
statuses = [ConsentServices.evaluate_type_status(tenant_id, ct) for ct in ConsentServices.get_required_consent_types()]
|
||||
return ConsentServices.aggregate_status(statuses)
|
||||
|
||||
@staticmethod
|
||||
def _is_tenant_admin_for(tenant_id: int) -> bool:
|
||||
try:
|
||||
return current_user.is_authenticated and current_user.has_roles('Tenant Admin') and getattr(current_user, 'tenant_id', None) == tenant_id
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _is_management_partner_for(tenant_id: int) -> Tuple[bool, Optional[int], Optional[int]]:
|
||||
"""Return (allowed, partner_id, partner_service_id) for management partner context."""
|
||||
try:
|
||||
if not (current_user.is_authenticated and current_user.has_roles('Partner Admin')):
|
||||
return False, None, None
|
||||
# Check PartnerTenant relationship via MANAGEMENT_SERVICE
|
||||
ps = PartnerService.query.filter_by(type='MANAGEMENT_SERVICE').all()
|
||||
if not ps:
|
||||
return False, None, None
|
||||
ps_ids = [p.id for p in ps]
|
||||
pt = PartnerTenant.query.filter_by(tenant_id=tenant_id).filter(PartnerTenant.partner_service_id.in_(ps_ids)).first()
|
||||
if not pt:
|
||||
return False, None, None
|
||||
the_ps = PartnerService.query.get(pt.partner_service_id)
|
||||
return True, the_ps.partner_id if the_ps else None, the_ps.id if the_ps else None
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error in _is_management_partner_for: {e}")
|
||||
return False, None, None
|
||||
|
||||
@staticmethod
|
||||
def can_consent_on_behalf(tenant_id: int) -> Tuple[bool, str, Optional[int], Optional[int]]:
|
||||
# Returns: allowed, mode('tenant_admin'|'management_partner'), partner_id, partner_service_id
|
||||
if ConsentServices._is_tenant_admin_for(tenant_id):
|
||||
return True, 'tenant_admin', None, None
|
||||
allowed, partner_id, partner_service_id = ConsentServices._is_management_partner_for(tenant_id)
|
||||
if allowed:
|
||||
return True, 'management_partner', partner_id, partner_service_id
|
||||
return False, 'none', None, None
|
||||
|
||||
@staticmethod
|
||||
def _resolve_consent_content(consent_type: str, version: str) -> Dict:
|
||||
"""Resolve canonical file ref and hash for a consent document.
|
||||
Uses configurable base dir, type subpaths, and patch-dir strategy.
|
||||
Defaults:
|
||||
- base: 'content'
|
||||
- map: {'Data Privacy Agreement':'dpa','Terms & Conditions':'terms'}
|
||||
- strategy: 'major_minor' -> a.b.c => a.b/a.b.c.md
|
||||
- ext: '.md'
|
||||
"""
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
|
||||
cfg = current_app.config if current_app else {}
|
||||
base_dir = cfg.get('CONSENT_CONTENT_BASE_DIR', 'content')
|
||||
type_paths = cfg.get('CONSENT_TYPE_PATHS', {
|
||||
'Data Privacy Agreement': 'dpa',
|
||||
'Terms & Conditions': 'terms',
|
||||
})
|
||||
strategy = cfg.get('CONSENT_PATCH_DIR_STRATEGY', 'major_minor')
|
||||
ext = cfg.get('CONSENT_MARKDOWN_EXT', '.md')
|
||||
|
||||
type_dir = type_paths.get(consent_type, consent_type.lower().replace(' ', '_'))
|
||||
subpath = ''
|
||||
filename = f"{version}{ext}"
|
||||
try:
|
||||
parts = version.split('.')
|
||||
if strategy == 'major_minor' and len(parts) >= 2:
|
||||
subpath = f"{parts[0]}.{parts[1]}"
|
||||
filename = f"{parts[0]}.{parts[1]}.{parts[2] if len(parts)>2 else '0'}{ext}"
|
||||
# Build canonical path
|
||||
if subpath:
|
||||
canonical_ref = f"{base_dir}/{type_dir}/{subpath}/{filename}"
|
||||
else:
|
||||
canonical_ref = f"{base_dir}/{type_dir}/{filename}"
|
||||
except Exception:
|
||||
canonical_ref = f"{base_dir}/{type_dir}/{version}{ext}"
|
||||
|
||||
# Read file and hash
|
||||
content_hash = ''
|
||||
try:
|
||||
# project root = parent of app package
|
||||
root = Path(current_app.root_path).parent if current_app else Path('.')
|
||||
fpath = root / canonical_ref
|
||||
content_bytes = fpath.read_bytes() if fpath.exists() else b''
|
||||
content_hash = hashlib.sha256(content_bytes).hexdigest() if content_bytes else ''
|
||||
except Exception:
|
||||
content_hash = ''
|
||||
|
||||
return {
|
||||
'canonical_document_ref': canonical_ref,
|
||||
'content_hash': content_hash,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def record_consent(tenant_id: int, consent_type: str) -> TenantConsent:
|
||||
# Validate type
|
||||
if consent_type not in ConsentServices.get_required_consent_types():
|
||||
raise ValueError(f"Unknown consent type: {consent_type}")
|
||||
active = ConsentServices.get_active_consent_version(consent_type)
|
||||
if not active:
|
||||
raise RuntimeError(f"No active ConsentVersion for type {consent_type}")
|
||||
|
||||
allowed, mode, partner_id, partner_service_id = ConsentServices.can_consent_on_behalf(tenant_id)
|
||||
if not allowed:
|
||||
raise PermissionError("Not authorized to record consent for this tenant")
|
||||
|
||||
# Idempotency: if already consented for active version, return existing
|
||||
existing = (TenantConsent.query
|
||||
.filter_by(tenant_id=tenant_id, consent_type=consent_type, consent_version=active.consent_version)
|
||||
.first())
|
||||
if existing:
|
||||
return existing
|
||||
|
||||
# Build consent_data with audit info
|
||||
ip = request.headers.get('X-Forwarded-For', '').split(',')[0].strip() or request.remote_addr or ''
|
||||
ua = request.headers.get('User-Agent', '')
|
||||
locale = session.get('locale') or request.accept_languages.best or ''
|
||||
content_meta = ConsentServices._resolve_consent_content(consent_type, active.consent_version)
|
||||
consent_data = {
|
||||
'source_ip': ip,
|
||||
'user_agent': ua,
|
||||
'locale': locale,
|
||||
**content_meta,
|
||||
}
|
||||
|
||||
tc = TenantConsent(
|
||||
tenant_id=tenant_id,
|
||||
partner_id=partner_id,
|
||||
partner_service_id=partner_service_id,
|
||||
user_id=getattr(current_user, 'id', None) or 0,
|
||||
consent_type=consent_type,
|
||||
consent_version=active.consent_version,
|
||||
consent_data=consent_data,
|
||||
)
|
||||
try:
|
||||
db.session.add(tc)
|
||||
db.session.commit()
|
||||
current_app.logger.info(f"Consent recorded: tenant={tenant_id}, type={consent_type}, version={active.consent_version}, mode={mode}, user={getattr(current_user, 'id', None)}")
|
||||
return tc
|
||||
except IntegrityError as e:
|
||||
db.session.rollback()
|
||||
# In case of race, fetch existing
|
||||
current_app.logger.warning(f"IntegrityError on consent insert, falling back: {e}")
|
||||
existing = (TenantConsent.query
|
||||
.filter_by(tenant_id=tenant_id, consent_type=consent_type, consent_version=active.consent_version)
|
||||
.first())
|
||||
if existing:
|
||||
return existing
|
||||
raise
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f"DB error in record_consent: {e}")
|
||||
raise
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from flask import session
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
@@ -6,7 +6,6 @@ from sqlalchemy.exc import SQLAlchemyError
|
||||
from common.models.entitlements import PartnerServiceLicenseTier
|
||||
from common.utils.eveai_exceptions import EveAINoManagementPartnerService, EveAINoSessionPartner
|
||||
|
||||
from common.utils.security_utils import current_user_has_role
|
||||
|
||||
|
||||
class PartnerServices:
|
||||
@@ -43,5 +42,11 @@ class PartnerServices:
|
||||
|
||||
return license_tier_ids
|
||||
|
||||
@staticmethod
|
||||
def get_management_service() -> Dict[str, Any]:
|
||||
management_service = next((service for service in session['partner']['services']
|
||||
if service.get('type') == 'MANAGEMENT_SERVICE'), None)
|
||||
return management_service
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
from typing import Dict, List
|
||||
|
||||
from flask import session, current_app
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from common.extensions import db, cache_manager
|
||||
from common.models.user import Partner, PartnerTenant, PartnerService, Tenant
|
||||
from common.models.user import Partner, PartnerTenant, PartnerService, Tenant, TenantConsent, ConsentStatus, \
|
||||
ConsentVersion
|
||||
from common.utils.eveai_exceptions import EveAINoManagementPartnerService
|
||||
from common.utils.model_logging_utils import set_logging_information
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
from common.utils.security_utils import current_user_has_role
|
||||
|
||||
|
||||
class TenantServices:
|
||||
@@ -28,7 +29,7 @@ class TenantServices:
|
||||
if service.get('type') == 'MANAGEMENT_SERVICE'), None)
|
||||
|
||||
if not management_service:
|
||||
current_app.logger.error(f"No Management Service defined for partner {partner_id}"
|
||||
current_app.logger.error(f"No Management Service defined for partner {partner_id} "
|
||||
f"while associating tenant {tenant_id} with partner.")
|
||||
raise EveAINoManagementPartnerService()
|
||||
|
||||
@@ -47,101 +48,101 @@ class TenantServices:
|
||||
current_app.logger.error(f"Error associating tenant {tenant_id} with partner: {str(e)}")
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
def get_available_types_for_tenant(tenant_id: int, config_type: str) -> Dict[str, Dict[str, str]]:
|
||||
"""
|
||||
Get available configuration types for a tenant based on partner relationships
|
||||
@staticmethod
|
||||
def get_available_types_for_tenant(tenant_id: int, config_type: str) -> Dict[str, Dict[str, str]]:
|
||||
"""
|
||||
Get available configuration types for a tenant based on partner relationships
|
||||
|
||||
Args:
|
||||
tenant_id: The tenant ID
|
||||
config_type: The configuration type ('specialists', 'agents', 'tasks', etc.)
|
||||
Args:
|
||||
tenant_id: The tenant ID
|
||||
config_type: The configuration type ('specialists', 'agents', 'tasks', etc.)
|
||||
|
||||
Returns:
|
||||
Dictionary of available types for the tenant
|
||||
"""
|
||||
# Get the appropriate cache handler based on config_type
|
||||
cache_handler = None
|
||||
if config_type == 'specialists':
|
||||
cache_handler = cache_manager.specialists_types_cache
|
||||
elif config_type == 'agents':
|
||||
cache_handler = cache_manager.agents_types_cache
|
||||
elif config_type == 'tasks':
|
||||
cache_handler = cache_manager.tasks_types_cache
|
||||
elif config_type == 'tools':
|
||||
cache_handler = cache_manager.tools_types_cache
|
||||
else:
|
||||
raise ValueError(f"Unsupported config type: {config_type}")
|
||||
Returns:
|
||||
Dictionary of available types for the tenant
|
||||
"""
|
||||
# Get the appropriate cache handler based on config_type
|
||||
cache_handler = None
|
||||
if config_type == 'specialists':
|
||||
cache_handler = cache_manager.specialists_types_cache
|
||||
elif config_type == 'agents':
|
||||
cache_handler = cache_manager.agents_types_cache
|
||||
elif config_type == 'tasks':
|
||||
cache_handler = cache_manager.tasks_types_cache
|
||||
elif config_type == 'tools':
|
||||
cache_handler = cache_manager.tools_types_cache
|
||||
elif config_type == 'catalogs':
|
||||
cache_handler = cache_manager.catalogs_types_cache
|
||||
elif config_type == 'retrievers':
|
||||
cache_handler = cache_manager.retrievers_types_cache
|
||||
else:
|
||||
raise ValueError(f"Unsupported config type: {config_type}")
|
||||
|
||||
# Get all types with their metadata (including partner info)
|
||||
all_types = cache_handler.get_types()
|
||||
# Get all types with their metadata (including partner info)
|
||||
all_types = cache_handler.get_types()
|
||||
|
||||
# Filter to include:
|
||||
# 1. Types with no partner (global)
|
||||
# 2. Types with partners that have a SPECIALIST_SERVICE relationship with this tenant
|
||||
available_partners = TenantServices.get_tenant_partner_names(tenant_id)
|
||||
# Filter to include:
|
||||
# 1. Types with no partner (global)
|
||||
# 2. Types with partners that have a SPECIALIST_SERVICE relationship with this tenant
|
||||
available_partners = TenantServices.get_tenant_partner_specialist_denominators(tenant_id)
|
||||
|
||||
available_types = {
|
||||
type_id: info for type_id, info in all_types.items()
|
||||
if info.get('partner') is None or info.get('partner') in available_partners
|
||||
}
|
||||
available_types = {
|
||||
type_id: info for type_id, info in all_types.items()
|
||||
if info.get('partner') is None or info.get('partner') in available_partners
|
||||
}
|
||||
|
||||
return available_types
|
||||
return available_types
|
||||
|
||||
@staticmethod
|
||||
def get_tenant_partner_names(tenant_id: int) -> List[str]:
|
||||
"""
|
||||
Get names of partners that have a SPECIALIST_SERVICE relationship with this tenant
|
||||
@staticmethod
|
||||
def get_tenant_partner_specialist_denominators(tenant_id: int) -> List[str]:
|
||||
"""
|
||||
Get names of partners that have a SPECIALIST_SERVICE relationship with this tenant, that can be used for
|
||||
filtering configurations.
|
||||
|
||||
Args:
|
||||
tenant_id: The tenant ID
|
||||
Args:
|
||||
tenant_id: The tenant ID
|
||||
|
||||
Returns:
|
||||
List of partner names (tenant names)
|
||||
"""
|
||||
# Find all PartnerTenant relationships for this tenant
|
||||
partner_names = []
|
||||
try:
|
||||
# Get all partner services of type SPECIALIST_SERVICE
|
||||
specialist_services = (
|
||||
Returns:
|
||||
List of partner names (tenant names)
|
||||
"""
|
||||
# Find all PartnerTenant relationships for this tenant
|
||||
partner_service_denominators = []
|
||||
try:
|
||||
# Get all partner services of type SPECIALIST_SERVICE
|
||||
specialist_services = (
|
||||
PartnerService.query
|
||||
.filter_by(type='SPECIALIST_SERVICE')
|
||||
.all()
|
||||
)
|
||||
|
||||
if not specialist_services:
|
||||
return []
|
||||
|
||||
# Find tenant relationships with these services
|
||||
partner_tenants = (
|
||||
PartnerTenant.query
|
||||
.filter_by(tenant_id=tenant_id)
|
||||
.filter(PartnerTenant.partner_service_id.in_([svc.id for svc in specialist_services]))
|
||||
.all()
|
||||
)
|
||||
|
||||
# Get the partner names (their tenant names)
|
||||
for pt in partner_tenants:
|
||||
partner_service = (
|
||||
PartnerService.query
|
||||
.filter_by(type='SPECIALIST_SERVICE')
|
||||
.all()
|
||||
.filter_by(id=pt.partner_service_id)
|
||||
.first()
|
||||
)
|
||||
|
||||
if not specialist_services:
|
||||
return []
|
||||
if partner_service:
|
||||
partner_service_denominators.append(partner_service.configuration.get("specialist_denominator", ""))
|
||||
|
||||
# Find tenant relationships with these services
|
||||
partner_tenants = (
|
||||
PartnerTenant.query
|
||||
.filter_by(tenant_id=tenant_id)
|
||||
.filter(PartnerTenant.partner_service_id.in_([svc.id for svc in specialist_services]))
|
||||
.all()
|
||||
)
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f"Database error retrieving partner names: {str(e)}")
|
||||
|
||||
# Get the partner names (their tenant names)
|
||||
for pt in partner_tenants:
|
||||
partner_service = (
|
||||
PartnerService.query
|
||||
.filter_by(id=pt.partner_service_id)
|
||||
.first()
|
||||
)
|
||||
return partner_service_denominators
|
||||
|
||||
if partner_service:
|
||||
partner = Partner.query.get(partner_service.partner_id)
|
||||
if partner:
|
||||
# Get the tenant associated with this partner
|
||||
partner_tenant = Tenant.query.get(partner.tenant_id)
|
||||
if partner_tenant:
|
||||
partner_names.append(partner_tenant.name)
|
||||
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f"Database error retrieving partner names: {str(e)}")
|
||||
|
||||
return partner_names
|
||||
|
||||
@staticmethod
|
||||
def can_use_specialist_type(tenant_id: int, specialist_type: str) -> bool:
|
||||
@staticmethod
|
||||
def can_use_specialist_type(tenant_id: int, specialist_type: str) -> bool:
|
||||
"""
|
||||
Check if a tenant can use a specific specialist type
|
||||
|
||||
@@ -166,10 +167,16 @@ class TenantServices:
|
||||
|
||||
# If it's a partner-specific specialist, check if tenant has access
|
||||
partner_name = specialist_def.get('partner')
|
||||
available_partners = TenantServices.get_tenant_partner_names(tenant_id)
|
||||
available_partners = TenantServices.get_tenant_partner_specialist_denominators(tenant_id)
|
||||
|
||||
return partner_name in available_partners
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error checking specialist type access: {str(e)}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_consent_status(tenant_id: int) -> ConsentStatus:
|
||||
# Delegate to centralized ConsentService to ensure consistent logic
|
||||
from common.services.user.consent_services import ConsentServices
|
||||
return ConsentServices.get_consent_status(tenant_id)
|
||||
|
||||
108
common/services/utils/human_answer_services.py
Normal file
108
common/services/utils/human_answer_services.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from flask import current_app, session
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
|
||||
from common.utils.business_event import BusinessEvent
|
||||
from common.utils.business_event_context import current_event
|
||||
from common.utils.model_utils import get_template
|
||||
from eveai_chat_workers.outputs.globals.a2q_output.q_a_output_v1_0 import A2QOutput
|
||||
from eveai_chat_workers.outputs.globals.q_a_output.q_a_output_v1_0 import QAOutput
|
||||
|
||||
|
||||
class HumanAnswerServices:
|
||||
@staticmethod
|
||||
def check_affirmative_answer(tenant_id: int, question: str, answer: str, language_iso: str) -> bool:
|
||||
return HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso, "check_affirmative_answer",
|
||||
"Check Affirmative Answer")
|
||||
|
||||
@staticmethod
|
||||
def check_additional_information(tenant_id: int, question: str, answer: str, language_iso: str) -> bool:
|
||||
result = HumanAnswerServices._check_answer(tenant_id, question, answer, language_iso,
|
||||
"check_additional_information", "Check Additional Information")
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def get_answer_to_question(tenant_id: int, question: str, answer: str, language_iso: str) -> str:
|
||||
|
||||
language = HumanAnswerServices._process_arguments(question, answer, language_iso)
|
||||
span_name = "Get Answer To Question"
|
||||
template_name = "get_answer_to_question"
|
||||
|
||||
if not current_event:
|
||||
with BusinessEvent('Answer Check Service', tenant_id):
|
||||
with current_event.create_span(span_name):
|
||||
return HumanAnswerServices._get_answer_to_question_logic(question, answer, language, template_name)
|
||||
else:
|
||||
with current_event.create_span('Check Affirmative Answer'):
|
||||
return HumanAnswerServices._get_answer_to_question_logic(question, answer, language, template_name)
|
||||
|
||||
@staticmethod
|
||||
def _check_answer(tenant_id: int, question: str, answer: str, language_iso: str, template_name: str,
|
||||
span_name: str) -> bool:
|
||||
language = HumanAnswerServices._process_arguments(question, answer, language_iso)
|
||||
if not current_event:
|
||||
with BusinessEvent('Answer Check Service', tenant_id):
|
||||
with current_event.create_span(span_name):
|
||||
return HumanAnswerServices._check_answer_logic(question, answer, language, template_name)
|
||||
else:
|
||||
with current_event.create_span(span_name):
|
||||
return HumanAnswerServices._check_answer_logic(question, answer, language, template_name)
|
||||
|
||||
@staticmethod
|
||||
def _check_answer_logic(question: str, answer: str, language: str, template_name: str) -> bool:
|
||||
prompt_params = {
|
||||
'question': question,
|
||||
'answer': answer,
|
||||
'language': language,
|
||||
}
|
||||
|
||||
template, llm = get_template(template_name)
|
||||
check_answer_prompt = ChatPromptTemplate.from_template(template)
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
output_schema = QAOutput
|
||||
structured_llm = llm.with_structured_output(output_schema)
|
||||
|
||||
chain = (setup | check_answer_prompt | structured_llm )
|
||||
|
||||
raw_answer = chain.invoke(prompt_params)
|
||||
|
||||
return raw_answer.answer
|
||||
|
||||
@staticmethod
|
||||
def _get_answer_to_question_logic(question: str, answer: str, language: str, template_name: str) \
|
||||
-> str:
|
||||
prompt_params = {
|
||||
'question': question,
|
||||
'answer': answer,
|
||||
'language': language,
|
||||
}
|
||||
|
||||
template, llm = get_template(template_name)
|
||||
check_answer_prompt = ChatPromptTemplate.from_template(template)
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
output_schema = A2QOutput
|
||||
structured_llm = llm.with_structured_output(output_schema)
|
||||
|
||||
chain = (setup | check_answer_prompt | structured_llm)
|
||||
|
||||
raw_answer = chain.invoke(prompt_params)
|
||||
|
||||
return raw_answer.answer
|
||||
|
||||
@staticmethod
|
||||
def _process_arguments(question, answer, language_iso: str) -> str:
|
||||
if language_iso.strip() == '':
|
||||
raise ValueError("Language cannot be empty")
|
||||
language = current_app.config.get('SUPPORTED_LANGUAGE_ISO639_1_LOOKUP').get(language_iso)
|
||||
if language is None:
|
||||
raise ValueError(f"Unsupported language: {language_iso}")
|
||||
if question.strip() == '':
|
||||
raise ValueError("Question cannot be empty")
|
||||
if answer.strip() == '':
|
||||
raise ValueError("Answer cannot be empty")
|
||||
|
||||
return language
|
||||
203
common/services/utils/translation_services.py
Normal file
203
common/services/utils/translation_services.py
Normal file
@@ -0,0 +1,203 @@
|
||||
import json
|
||||
import copy
|
||||
import re
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
from flask import session
|
||||
|
||||
from common.extensions import cache_manager
|
||||
from common.utils.business_event import BusinessEvent
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
class TranslationServices:
|
||||
|
||||
@staticmethod
|
||||
def translate_config(tenant_id: int, config_data: Dict[str, Any], field_config: str, target_language: str,
|
||||
source_language: Optional[str] = None, context: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Vertaalt een configuratie op basis van een veld-configuratie.
|
||||
|
||||
Args:
|
||||
tenant_id: Identificatie van de tenant waarvoor we de vertaling doen.
|
||||
config_data: Een dictionary of JSON (die dan wordt geconverteerd naar een dictionary) met configuratiegegevens
|
||||
field_config: De naam van een veld-configuratie (bijv. 'fields')
|
||||
target_language: De taal waarnaar vertaald moet worden
|
||||
source_language: Optioneel, de brontaal van de configuratie
|
||||
context: Optioneel, een specifieke context voor de vertaling
|
||||
|
||||
Returns:
|
||||
Een dictionary met de vertaalde configuratie
|
||||
"""
|
||||
config_type = config_data.get('type', 'Unknown')
|
||||
config_version = config_data.get('version', 'Unknown')
|
||||
span_name = f"{config_type}-{config_version}-{field_config}"
|
||||
|
||||
if current_event:
|
||||
with current_event.create_span(span_name):
|
||||
translated_config = TranslationServices._translate_config(tenant_id, config_data, field_config,
|
||||
target_language, source_language, context)
|
||||
return translated_config
|
||||
else:
|
||||
with BusinessEvent('Config Translation Service', tenant_id):
|
||||
with current_event.create_span(span_name):
|
||||
translated_config = TranslationServices._translate_config(tenant_id, config_data, field_config,
|
||||
target_language, source_language, context)
|
||||
return translated_config
|
||||
|
||||
@staticmethod
|
||||
def _translate_config(tenant_id: int, config_data: Dict[str, Any], field_config: str, target_language: str,
|
||||
source_language: Optional[str] = None, context: Optional[str] = None) -> Dict[str, Any]:
|
||||
|
||||
# Zorg ervoor dat we een dictionary hebben
|
||||
if isinstance(config_data, str):
|
||||
config_data = json.loads(config_data)
|
||||
|
||||
# Maak een deep copy van de originele data om te wijzigen en input-mutatie te vermijden
|
||||
translated_config = copy.deepcopy(config_data)
|
||||
|
||||
# Haal type en versie op voor de Business Event span
|
||||
config_type = config_data.get('type', 'Unknown')
|
||||
config_version = config_data.get('version', 'Unknown')
|
||||
|
||||
if field_config in config_data:
|
||||
fields = config_data[field_config]
|
||||
|
||||
# Haal description uit metadata voor context als geen context is opgegeven
|
||||
description_context = ""
|
||||
if not context and 'metadata' in config_data and 'description' in config_data['metadata']:
|
||||
description_context = config_data['metadata']['description']
|
||||
|
||||
# Hulpfuncties
|
||||
def is_nonempty_str(val: Any) -> bool:
|
||||
return isinstance(val, str) and val.strip() != ''
|
||||
|
||||
def safe_translate(text: str, ctx: Optional[str]):
|
||||
try:
|
||||
res = cache_manager.translation_cache.get_translation(
|
||||
text=text,
|
||||
target_lang=target_language,
|
||||
source_lang=source_language,
|
||||
context=ctx
|
||||
)
|
||||
return res.translated_text if res else None
|
||||
except Exception as e:
|
||||
if current_event:
|
||||
current_event.log_error('translation_error', {
|
||||
'tenant_id': tenant_id,
|
||||
'config_type': config_type,
|
||||
'config_version': config_version,
|
||||
'field_config': field_config,
|
||||
'error': str(e)
|
||||
})
|
||||
return None
|
||||
|
||||
tag_pair_pattern = re.compile(r'<([a-zA-Z][\w-]*)>[\s\S]*?<\/\1>')
|
||||
|
||||
def extract_tag_counts(text: str) -> Dict[str, int]:
|
||||
counts: Dict[str, int] = {}
|
||||
for m in tag_pair_pattern.finditer(text or ''):
|
||||
tag = m.group(1)
|
||||
counts[tag] = counts.get(tag, 0) + 1
|
||||
return counts
|
||||
|
||||
def tags_valid(source: str, translated: str) -> bool:
|
||||
return extract_tag_counts(source) == extract_tag_counts(translated)
|
||||
|
||||
# Counters
|
||||
meta_consentRich_translated_count = 0
|
||||
meta_aria_translated_count = 0
|
||||
meta_inline_tags_invalid_after_translation_count = 0
|
||||
|
||||
# Loop door elk veld in de configuratie
|
||||
for field_name, field_data in fields.items():
|
||||
# Vertaal name als het bestaat en niet leeg is (alleen strings)
|
||||
if 'name' in field_data and is_nonempty_str(field_data['name']):
|
||||
field_context = context if context else description_context
|
||||
t = safe_translate(field_data['name'], field_context)
|
||||
if t:
|
||||
translated_config[field_config][field_name]['name'] = t
|
||||
|
||||
if 'title' in field_data and is_nonempty_str(field_data.get('title')):
|
||||
field_context = context if context else description_context
|
||||
t = safe_translate(field_data['title'], field_context)
|
||||
if t:
|
||||
translated_config[field_config][field_name]['title'] = t
|
||||
|
||||
# Vertaal description als het bestaat en niet leeg is
|
||||
if 'description' in field_data and is_nonempty_str(field_data.get('description')):
|
||||
field_context = context if context else description_context
|
||||
t = safe_translate(field_data['description'], field_context)
|
||||
if t:
|
||||
translated_config[field_config][field_name]['description'] = t
|
||||
|
||||
# Vertaal context als het bestaat en niet leeg is
|
||||
if 'context' in field_data and is_nonempty_str(field_data.get('context')):
|
||||
t = safe_translate(field_data['context'], context)
|
||||
if t:
|
||||
translated_config[field_config][field_name]['context'] = t
|
||||
|
||||
# vertaal allowed_values als het veld bestaat en waarden niet leeg zijn (alleen string-items)
|
||||
if 'allowed_values' in field_data and isinstance(field_data['allowed_values'], list) and field_data['allowed_values']:
|
||||
translated_allowed_values = []
|
||||
for allowed_value in field_data['allowed_values']:
|
||||
if is_nonempty_str(allowed_value):
|
||||
t = safe_translate(allowed_value, context)
|
||||
translated_allowed_values.append(t if t else allowed_value)
|
||||
else:
|
||||
translated_allowed_values.append(allowed_value)
|
||||
if translated_allowed_values:
|
||||
translated_config[field_config][field_name]['allowed_values'] = translated_allowed_values
|
||||
|
||||
# Vertaal meta.consentRich en meta.aria*
|
||||
meta = field_data.get('meta')
|
||||
if isinstance(meta, dict):
|
||||
# consentRich
|
||||
if is_nonempty_str(meta.get('consentRich')):
|
||||
consent_ctx = (context if context else description_context) or ''
|
||||
consent_ctx = f"Consent rich text with inline tags. Keep tag names intact and translate only inner text. {consent_ctx}".strip()
|
||||
t = safe_translate(meta['consentRich'], consent_ctx)
|
||||
if t and tags_valid(meta['consentRich'], t):
|
||||
translated_config[field_config][field_name].setdefault('meta', {})['consentRich'] = t
|
||||
meta_consentRich_translated_count += 1
|
||||
else:
|
||||
if t and not tags_valid(meta['consentRich'], t) and current_event:
|
||||
src_counts = extract_tag_counts(meta['consentRich'])
|
||||
dst_counts = extract_tag_counts(t)
|
||||
current_event.log_error('inline_tags_validation_failed', {
|
||||
'tenant_id': tenant_id,
|
||||
'config_type': config_type,
|
||||
'config_version': config_version,
|
||||
'field_config': field_config,
|
||||
'field_name': field_name,
|
||||
'target_language': target_language,
|
||||
'source_tag_counts': src_counts,
|
||||
'translated_tag_counts': dst_counts
|
||||
})
|
||||
meta_inline_tags_invalid_after_translation_count += 1
|
||||
# fallback: keep original (already in deep copy)
|
||||
# aria*
|
||||
for k, v in list(meta.items()):
|
||||
if isinstance(k, str) and k.startswith('aria') and is_nonempty_str(v):
|
||||
aria_ctx = (context if context else description_context) or ''
|
||||
aria_ctx = f"ARIA label for accessibility. Short, imperative, descriptive. Form '{config_type} {config_version}', field '{field_name}'. {aria_ctx}".strip()
|
||||
t2 = safe_translate(v, aria_ctx)
|
||||
if t2:
|
||||
translated_config[field_config][field_name].setdefault('meta', {})[k] = t2
|
||||
meta_aria_translated_count += 1
|
||||
|
||||
return translated_config
|
||||
|
||||
@staticmethod
|
||||
def translate(tenant_id: int, text: str, target_language: str, source_language: Optional[str] = None,
|
||||
context: Optional[str] = None)-> str:
|
||||
if current_event:
|
||||
with current_event.create_span('Translation'):
|
||||
translation_cache = cache_manager.translation_cache.get_translation(text, target_language,
|
||||
source_language, context)
|
||||
return translation_cache.translated_text
|
||||
else:
|
||||
with BusinessEvent('Translation Service', tenant_id):
|
||||
with current_event.create_span('Translation'):
|
||||
translation_cache = cache_manager.translation_cache.get_translation(text, target_language,
|
||||
source_language, context)
|
||||
return translation_cache.translated_text
|
||||
14
common/services/utils/version_services.py
Normal file
14
common/services/utils/version_services.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from flask import current_app
|
||||
|
||||
class VersionServices:
|
||||
@staticmethod
|
||||
def split_version(full_version: str) -> tuple[str, str]:
|
||||
parts = full_version.split(".")
|
||||
if len(parts) < 3:
|
||||
major_minor = '.'.join(parts[:2]) if len(parts) >= 2 else full_version
|
||||
patch = ''
|
||||
else:
|
||||
major_minor = '.'.join(parts[:2])
|
||||
patch = parts[2]
|
||||
|
||||
return major_minor, patch
|
||||
22
common/templates/error/401.html
Normal file
22
common/templates/error/401.html
Normal file
@@ -0,0 +1,22 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Unauthorized</title>
|
||||
<style>
|
||||
body { font-family: system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, sans-serif; background:#f7f7f9; color:#222; }
|
||||
.wrap { max-width: 720px; margin: 10vh auto; background:#fff; border:1px solid #e5e7eb; border-radius:12px; padding:32px; box-shadow: 0 8px 24px rgba(0,0,0,0.06); }
|
||||
h1 { margin: 0 0 8px; font-size: 28px; }
|
||||
p { margin: 0 0 16px; line-height:1.6; }
|
||||
a.btn { display:inline-block; padding:10px 16px; background:#2c3e50; color:#fff; text-decoration:none; border-radius:8px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<main class="wrap">
|
||||
<h1>Not authorized</h1>
|
||||
<p>Your session may have expired or this action is not permitted.</p>
|
||||
<p><a class="btn" href="/">Go to home</a></p>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
22
common/templates/error/403.html
Normal file
22
common/templates/error/403.html
Normal file
@@ -0,0 +1,22 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Forbidden</title>
|
||||
<style>
|
||||
body { font-family: system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, sans-serif; background:#f7f7f9; color:#222; }
|
||||
.wrap { max-width: 720px; margin: 10vh auto; background:#fff; border:1px solid #e5e7eb; border-radius:12px; padding:32px; box-shadow: 0 8px 24px rgba(0,0,0,0.06); }
|
||||
h1 { margin: 0 0 8px; font-size: 28px; }
|
||||
p { margin: 0 0 16px; line-height:1.6; }
|
||||
a.btn { display:inline-block; padding:10px 16px; background:#2c3e50; color:#fff; text-decoration:none; border-radius:8px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<main class="wrap">
|
||||
<h1>Access forbidden</h1>
|
||||
<p>You don't have permission to access this resource.</p>
|
||||
<p><a class="btn" href="/">Go to home</a></p>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
22
common/templates/error/404.html
Normal file
22
common/templates/error/404.html
Normal file
@@ -0,0 +1,22 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Page not found</title>
|
||||
<style>
|
||||
body { font-family: system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, sans-serif; background:#f7f7f9; color:#222; }
|
||||
.wrap { max-width: 720px; margin: 10vh auto; background:#fff; border:1px solid #e5e7eb; border-radius:12px; padding:32px; box-shadow: 0 8px 24px rgba(0,0,0,0.06); }
|
||||
h1 { margin: 0 0 8px; font-size: 28px; }
|
||||
p { margin: 0 0 16px; line-height:1.6; }
|
||||
a.btn { display:inline-block; padding:10px 16px; background:#2c3e50; color:#fff; text-decoration:none; border-radius:8px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<main class="wrap">
|
||||
<h1>Page not found</h1>
|
||||
<p>The page you are looking for doesn’t exist or has been moved.</p>
|
||||
<p><a class="btn" href="/">Go to home</a></p>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
22
common/templates/error/500.html
Normal file
22
common/templates/error/500.html
Normal file
@@ -0,0 +1,22 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Something went wrong</title>
|
||||
<style>
|
||||
body { font-family: system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, sans-serif; background:#f7f7f9; color:#222; }
|
||||
.wrap { max-width: 720px; margin: 10vh auto; background:#fff; border:1px solid #e5e7eb; border-radius:12px; padding:32px; box-shadow: 0 8px 24px rgba(0,0,0,0.06); }
|
||||
h1 { margin: 0 0 8px; font-size: 28px; }
|
||||
p { margin: 0 0 16px; line-height:1.6; }
|
||||
a.btn { display:inline-block; padding:10px 16px; background:#2c3e50; color:#fff; text-decoration:none; border-radius:8px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<main class="wrap">
|
||||
<h1>We’re sorry — something went wrong</h1>
|
||||
<p>Please try again later. If the issue persists, contact support.</p>
|
||||
<p><a class="btn" href="/">Go to home</a></p>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
22
common/templates/error/generic.html
Normal file
22
common/templates/error/generic.html
Normal file
@@ -0,0 +1,22 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Error</title>
|
||||
<style>
|
||||
body { font-family: system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, sans-serif; background:#f7f7f9; color:#222; }
|
||||
.wrap { max-width: 720px; margin: 10vh auto; background:#fff; border:1px solid #e5e7eb; border-radius:12px; padding:32px; box-shadow: 0 8px 24px rgba(0,0,0,0.06); }
|
||||
h1 { margin: 0 0 8px; font-size: 28px; }
|
||||
p { margin: 0 0 16px; line-height:1.6; }
|
||||
a.btn { display:inline-block; padding:10px 16px; background:#2c3e50; color:#fff; text-decoration:none; border-radius:8px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<main class="wrap">
|
||||
<h1>Oops! Something went wrong</h1>
|
||||
<p>Please try again. If the issue persists, contact support.</p>
|
||||
<p><a class="btn" href="/">Go to home</a></p>
|
||||
</main>
|
||||
</body>
|
||||
</html>
|
||||
45
common/utils/asset_manifest.py
Normal file
45
common/utils/asset_manifest.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import json
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from typing import Dict
|
||||
|
||||
# Default manifest path inside app images; override with env
|
||||
DEFAULT_MANIFEST_PATH = os.environ.get(
|
||||
'EVEAI_STATIC_MANIFEST_PATH',
|
||||
'/app/config/static-manifest/manifest.json'
|
||||
)
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _load_manifest(manifest_path: str = DEFAULT_MANIFEST_PATH) -> Dict[str, str]:
|
||||
try:
|
||||
with open(manifest_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def resolve_asset(logical_path: str, manifest_path: str = DEFAULT_MANIFEST_PATH) -> str:
|
||||
"""
|
||||
Map a logical asset path (e.g. 'dist/chat-client.js') to the hashed path
|
||||
found in the Parcel manifest. If not found or manifest missing, return the
|
||||
original logical path for graceful fallback.
|
||||
"""
|
||||
if not logical_path:
|
||||
return logical_path
|
||||
|
||||
manifest = _load_manifest(manifest_path)
|
||||
|
||||
# Try several key variants as Parcel manifests may use different keys
|
||||
candidates = [
|
||||
logical_path,
|
||||
logical_path.lstrip('/'),
|
||||
logical_path.replace('static/', ''),
|
||||
logical_path.replace('dist/', ''),
|
||||
]
|
||||
|
||||
for key in candidates:
|
||||
if key in manifest:
|
||||
return manifest[key]
|
||||
|
||||
return logical_path
|
||||
@@ -4,59 +4,9 @@ from flask import current_app
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from common.extensions import cache_manager, minio_client, db
|
||||
from common.models.interaction import EveAIAsset, EveAIAssetVersion
|
||||
from common.models.interaction import EveAIAsset
|
||||
from common.utils.model_logging_utils import set_logging_information
|
||||
|
||||
|
||||
def create_asset_stack(api_input, tenant_id):
|
||||
type_version = cache_manager.assets_version_tree_cache.get_latest_version(api_input['type'])
|
||||
api_input['type_version'] = type_version
|
||||
new_asset = create_asset(api_input, tenant_id)
|
||||
new_asset_version = create_version_for_asset(new_asset, tenant_id)
|
||||
db.session.add(new_asset)
|
||||
db.session.add(new_asset_version)
|
||||
|
||||
try:
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f"Could not add asset for tenant {tenant_id}: {str(e)}")
|
||||
db.session.rollback()
|
||||
raise e
|
||||
|
||||
return new_asset, new_asset_version
|
||||
|
||||
|
||||
def create_asset(api_input, tenant_id):
|
||||
new_asset = EveAIAsset()
|
||||
new_asset.name = api_input['name']
|
||||
new_asset.description = api_input['description']
|
||||
new_asset.type = api_input['type']
|
||||
new_asset.type_version = api_input['type_version']
|
||||
if api_input['valid_from'] and api_input['valid_from'] != '':
|
||||
new_asset.valid_from = api_input['valid_from']
|
||||
else:
|
||||
new_asset.valid_from = dt.now(tz.utc)
|
||||
new_asset.valid_to = api_input['valid_to']
|
||||
set_logging_information(new_asset, dt.now(tz.utc))
|
||||
|
||||
return new_asset
|
||||
|
||||
|
||||
def create_version_for_asset(asset, tenant_id):
|
||||
new_asset_version = EveAIAssetVersion()
|
||||
new_asset_version.asset = asset
|
||||
new_asset_version.bucket_name = minio_client.create_tenant_bucket(tenant_id)
|
||||
set_logging_information(new_asset_version, dt.now(tz.utc))
|
||||
|
||||
return new_asset_version
|
||||
|
||||
|
||||
def add_asset_version_file(asset_version, field_name, file, tenant_id):
|
||||
object_name, file_size = minio_client.upload_file(asset_version.bucket_name, asset_version.id, field_name,
|
||||
file.content_type)
|
||||
# mark_tenant_storage_dirty(tenant_id)
|
||||
# TODO - zorg ervoor dat de herberekening van storage onmiddellijk gebeurt!
|
||||
return object_name
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -559,12 +559,24 @@ class BusinessEvent:
|
||||
self._log_buffer = []
|
||||
|
||||
def _push_to_gateway(self):
|
||||
# Push metrics to the gateway
|
||||
# Push metrics to the gateway with grouping key to avoid overwrites across pods/processes
|
||||
try:
|
||||
# Determine grouping labels
|
||||
pod_name = current_app.config.get('POD_NAME', current_app.config.get('COMPONENT_NAME', 'dev'))
|
||||
pod_namespace = current_app.config.get('POD_NAMESPACE', current_app.config.get('FLASK_ENV', 'dev'))
|
||||
worker_id = str(os.getpid())
|
||||
|
||||
grouping_key = {
|
||||
'instance': pod_name,
|
||||
'namespace': pod_namespace,
|
||||
'process': worker_id,
|
||||
}
|
||||
|
||||
push_to_gateway(
|
||||
current_app.config['PUSH_GATEWAY_URL'],
|
||||
job=current_app.config['COMPONENT_NAME'],
|
||||
registry=REGISTRY
|
||||
registry=REGISTRY,
|
||||
grouping_key=grouping_key,
|
||||
)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Failed to push metrics to Prometheus Push Gateway: {e}")
|
||||
|
||||
4
common/utils/cache/base.py
vendored
4
common/utils/cache/base.py
vendored
@@ -121,7 +121,7 @@ class CacheHandler(Generic[T]):
|
||||
region_name = getattr(self.region, 'name', 'default_region')
|
||||
|
||||
key = CacheKey({k: identifiers[k] for k in self._key_components})
|
||||
return f"{region_name}_{self.prefix}:{str(key)}"
|
||||
return f"{region_name}:{self.prefix}:{str(key)}"
|
||||
|
||||
def get(self, creator_func, **identifiers) -> T:
|
||||
"""
|
||||
@@ -179,7 +179,7 @@ class CacheHandler(Generic[T]):
|
||||
Deletes all keys that start with the region prefix.
|
||||
"""
|
||||
# Construct the pattern for all keys in this region
|
||||
pattern = f"{self.region}_{self.prefix}:*"
|
||||
pattern = f"{self.region}:{self.prefix}:*"
|
||||
|
||||
# Assuming Redis backend with dogpile, use `delete_multi` or direct Redis access
|
||||
if hasattr(self.region.backend, 'client'):
|
||||
|
||||
56
common/utils/cache/config_cache.py
vendored
56
common/utils/cache/config_cache.py
vendored
@@ -7,7 +7,7 @@ from flask import current_app
|
||||
|
||||
from common.utils.cache.base import CacheHandler, CacheKey
|
||||
from config.type_defs import agent_types, task_types, tool_types, specialist_types, retriever_types, prompt_types, \
|
||||
catalog_types, partner_service_types, processor_types
|
||||
catalog_types, partner_service_types, processor_types, customisation_types, specialist_form_types, capsule_types
|
||||
|
||||
|
||||
def is_major_minor(version: str) -> bool:
|
||||
@@ -332,24 +332,22 @@ class BaseConfigTypesCacheHandler(CacheHandler[Dict[str, Any]]):
|
||||
"""
|
||||
return isinstance(value, dict) # Cache all dictionaries
|
||||
|
||||
def _load_type_definitions(self) -> Dict[str, Dict[str, str]]:
|
||||
def _load_type_definitions(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Load type definitions from the corresponding type_defs module"""
|
||||
if not self._types_module:
|
||||
raise ValueError("_types_module must be set by subclass")
|
||||
|
||||
type_definitions = {
|
||||
type_id: {
|
||||
'name': info['name'],
|
||||
'description': info['description'],
|
||||
'partner': info.get('partner') # Include partner info if available
|
||||
}
|
||||
for type_id, info in self._types_module.items()
|
||||
}
|
||||
type_definitions = {}
|
||||
for type_id, info in self._types_module.items():
|
||||
# Kopieer alle velden uit de type definitie
|
||||
type_definitions[type_id] = {}
|
||||
for key, value in info.items():
|
||||
type_definitions[type_id][key] = value
|
||||
|
||||
return type_definitions
|
||||
|
||||
def get_types(self) -> Dict[str, Dict[str, str]]:
|
||||
"""Get dictionary of available types with name and description"""
|
||||
def get_types(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Get dictionary of available types with all defined properties"""
|
||||
result = self.get(
|
||||
lambda type_name: self._load_type_definitions(),
|
||||
type_name=f'{self.config_type}_types',
|
||||
@@ -463,7 +461,6 @@ ProcessorConfigCacheHandler, ProcessorConfigVersionTreeCacheHandler, ProcessorCo
|
||||
types_module=processor_types.PROCESSOR_TYPES
|
||||
))
|
||||
|
||||
# Add to common/utils/cache/config_cache.py
|
||||
PartnerServiceConfigCacheHandler, PartnerServiceConfigVersionTreeCacheHandler, PartnerServiceConfigTypesCacheHandler = (
|
||||
create_config_cache_handlers(
|
||||
config_type='partner_services',
|
||||
@@ -471,6 +468,31 @@ PartnerServiceConfigCacheHandler, PartnerServiceConfigVersionTreeCacheHandler, P
|
||||
types_module=partner_service_types.PARTNER_SERVICE_TYPES
|
||||
))
|
||||
|
||||
CustomisationConfigCacheHandler, CustomisationConfigVersionTreeCacheHandler, CustomisationConfigTypesCacheHandler = (
|
||||
create_config_cache_handlers(
|
||||
config_type='customisations',
|
||||
config_dir='config/customisations',
|
||||
types_module=customisation_types.CUSTOMISATION_TYPES
|
||||
)
|
||||
)
|
||||
|
||||
SpecialistFormConfigCacheHandler, SpecialistFormConfigVersionTreeCacheHandler, SpecialistFormConfigTypesCacheHandler = (
|
||||
create_config_cache_handlers(
|
||||
config_type='specialist_forms',
|
||||
config_dir='config/specialist_forms',
|
||||
types_module=specialist_form_types.SPECIALIST_FORM_TYPES
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
CapsuleConfigCacheHandler, CapsuleConfigVersionTreeCacheHandler, CapsuleConfigTypesCacheHandler = (
|
||||
create_config_cache_handlers(
|
||||
config_type='data_capsules',
|
||||
config_dir='config/data_capsules',
|
||||
types_module=capsule_types.CAPSULE_TYPES
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def register_config_cache_handlers(cache_manager) -> None:
|
||||
cache_manager.register_handler(AgentConfigCacheHandler, 'eveai_config')
|
||||
@@ -503,6 +525,12 @@ def register_config_cache_handlers(cache_manager) -> None:
|
||||
cache_manager.register_handler(PartnerServiceConfigCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(PartnerServiceConfigTypesCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(PartnerServiceConfigVersionTreeCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(CustomisationConfigCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(CustomisationConfigTypesCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(CustomisationConfigVersionTreeCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(SpecialistFormConfigCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(SpecialistFormConfigTypesCacheHandler, 'eveai_config')
|
||||
cache_manager.register_handler(SpecialistFormConfigVersionTreeCacheHandler, 'eveai_config')
|
||||
|
||||
cache_manager.agents_config_cache.set_version_tree_cache(cache_manager.agents_version_tree_cache)
|
||||
cache_manager.tasks_config_cache.set_version_tree_cache(cache_manager.tasks_version_tree_cache)
|
||||
@@ -513,3 +541,5 @@ def register_config_cache_handlers(cache_manager) -> None:
|
||||
cache_manager.catalogs_config_cache.set_version_tree_cache(cache_manager.catalogs_version_tree_cache)
|
||||
cache_manager.processors_config_cache.set_version_tree_cache(cache_manager.processors_version_tree_cache)
|
||||
cache_manager.partner_services_config_cache.set_version_tree_cache(cache_manager.partner_services_version_tree_cache)
|
||||
cache_manager.customisations_config_cache.set_version_tree_cache(cache_manager.customisations_version_tree_cache)
|
||||
cache_manager.specialist_forms_config_cache.set_version_tree_cache(cache_manager.specialist_forms_version_tree_cache)
|
||||
|
||||
56
common/utils/cache/regions.py
vendored
56
common/utils/cache/regions.py
vendored
@@ -1,48 +1,64 @@
|
||||
# common/utils/cache/regions.py
|
||||
import time
|
||||
|
||||
import redis
|
||||
from dogpile.cache import make_region
|
||||
from urllib.parse import urlparse
|
||||
import os
|
||||
|
||||
import ssl
|
||||
|
||||
def get_redis_config(app):
|
||||
"""
|
||||
Create Redis configuration dict based on app config
|
||||
Handles both authenticated and non-authenticated setups
|
||||
Create Redis configuration dict based on app config.
|
||||
Handles both authenticated and non-authenticated setups.
|
||||
"""
|
||||
app.logger.debug(f"Creating Redis config")
|
||||
# Parse the REDIS_BASE_URI to get all components
|
||||
redis_uri = urlparse(app.config['REDIS_BASE_URI'])
|
||||
# redis_uri = urlparse(app.config['REDIS_BASE_URI'])
|
||||
|
||||
config = {
|
||||
'host': redis_uri.hostname,
|
||||
'port': int(redis_uri.port or 6379),
|
||||
'db': 4, # Keep this for later use
|
||||
'redis_expiration_time': 3600,
|
||||
'distributed_lock': True,
|
||||
'thread_local_lock': False,
|
||||
'host': app.config['REDIS_URL'],
|
||||
'port': app.config['REDIS_PORT'],
|
||||
'max_connections': 20,
|
||||
'retry_on_timeout': True,
|
||||
'socket_keepalive': True,
|
||||
'socket_keepalive_options': {},
|
||||
}
|
||||
|
||||
# Add authentication if provided
|
||||
if redis_uri.username and redis_uri.password:
|
||||
un = app.config.get('REDIS_USER')
|
||||
pw = app.config.get('REDIS_PASS')
|
||||
if un and pw:
|
||||
config.update({
|
||||
'username': redis_uri.username,
|
||||
'password': redis_uri.password
|
||||
'username': un,
|
||||
'password': pw
|
||||
})
|
||||
|
||||
# SSL support using centralised config
|
||||
cert_path = app.config.get('REDIS_CA_CERT_PATH')
|
||||
redis_scheme = app.config.get('REDIS_SCHEME')
|
||||
if cert_path and redis_scheme == 'rediss':
|
||||
config.update({
|
||||
'connection_class': redis.SSLConnection,
|
||||
'ssl_cert_reqs': ssl.CERT_REQUIRED,
|
||||
'ssl_check_hostname': app.config.get('REDIS_SSL_CHECK_HOSTNAME', True),
|
||||
'ssl_ca_certs': cert_path,
|
||||
})
|
||||
|
||||
app.logger.debug(f"config for Redis connection: {config}")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def create_cache_regions(app):
|
||||
"""Initialize all cache regions with app config"""
|
||||
"""Initialise all cache regions with app config"""
|
||||
redis_config = get_redis_config(app)
|
||||
redis_pool = redis.ConnectionPool(**redis_config)
|
||||
regions = {}
|
||||
startup_time = int(time.time())
|
||||
|
||||
# Region for model-related caching (ModelVariables etc)
|
||||
model_region = make_region(name='eveai_model').configure(
|
||||
'dogpile.cache.redis',
|
||||
arguments=redis_config,
|
||||
arguments={'connection_pool': redis_pool},
|
||||
replace_existing_backend=True
|
||||
)
|
||||
regions['eveai_model'] = model_region
|
||||
@@ -50,7 +66,7 @@ def create_cache_regions(app):
|
||||
# Region for eveai_chat_workers components (Specialists, Retrievers, ...)
|
||||
eveai_chat_workers_region = make_region(name='eveai_chat_workers').configure(
|
||||
'dogpile.cache.redis',
|
||||
arguments=redis_config, # arguments={**redis_config, 'db': 4}, # Different DB
|
||||
arguments={'connection_pool': redis_pool},
|
||||
replace_existing_backend=True
|
||||
)
|
||||
regions['eveai_chat_workers'] = eveai_chat_workers_region
|
||||
@@ -58,14 +74,14 @@ def create_cache_regions(app):
|
||||
# Region for eveai_workers components (Processors, ...)
|
||||
eveai_workers_region = make_region(name='eveai_workers').configure(
|
||||
'dogpile.cache.redis',
|
||||
arguments=redis_config, # Same config for now
|
||||
arguments={'connection_pool': redis_pool}, # Same config for now
|
||||
replace_existing_backend=True
|
||||
)
|
||||
regions['eveai_workers'] = eveai_workers_region
|
||||
|
||||
eveai_config_region = make_region(name='eveai_config').configure(
|
||||
'dogpile.cache.redis',
|
||||
arguments=redis_config,
|
||||
arguments={'connection_pool': redis_pool},
|
||||
replace_existing_backend=True
|
||||
)
|
||||
regions['eveai_config'] = eveai_config_region
|
||||
|
||||
223
common/utils/cache/translation_cache.py
vendored
Normal file
223
common/utils/cache/translation_cache.py
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Dict, Any, Optional
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
import xxhash
|
||||
from flask import current_app
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from sqlalchemy.inspection import inspect
|
||||
|
||||
from common.langchain.persistent_llm_metrics_handler import PersistentLLMMetricsHandler
|
||||
from common.utils.business_event_context import current_event
|
||||
from common.utils.cache.base import CacheHandler, T
|
||||
from common.extensions import db
|
||||
|
||||
from common.models.user import TranslationCache
|
||||
from flask_security import current_user
|
||||
|
||||
from common.utils.model_utils import get_template
|
||||
|
||||
|
||||
class TranslationCacheHandler(CacheHandler[TranslationCache]):
|
||||
"""Handles caching of translations with fallback to database and external translation service"""
|
||||
handler_name = 'translation_cache'
|
||||
|
||||
def __init__(self, region):
|
||||
super().__init__(region, 'translation')
|
||||
self.configure_keys('hash_key')
|
||||
|
||||
def _to_cache_data(self, instance: TranslationCache) -> Dict[str, Any]:
|
||||
"""Convert TranslationCache instance to cache data using SQLAlchemy inspection"""
|
||||
if not instance:
|
||||
return {}
|
||||
|
||||
mapper = inspect(TranslationCache)
|
||||
data = {}
|
||||
|
||||
for column in mapper.columns:
|
||||
value = getattr(instance, column.name)
|
||||
|
||||
# Handle date serialization
|
||||
if isinstance(value, dt):
|
||||
data[column.name] = value.isoformat()
|
||||
else:
|
||||
data[column.name] = value
|
||||
|
||||
return data
|
||||
|
||||
def _from_cache_data(self, data: Dict[str, Any], **kwargs) -> TranslationCache:
|
||||
if not data:
|
||||
return None
|
||||
|
||||
# Create a new TranslationCache instance
|
||||
translation = TranslationCache()
|
||||
mapper = inspect(TranslationCache)
|
||||
|
||||
# Set all attributes dynamically
|
||||
for column in mapper.columns:
|
||||
if column.name in data:
|
||||
value = data[column.name]
|
||||
|
||||
# Handle date deserialization
|
||||
if column.name.endswith('_date') and value:
|
||||
if isinstance(value, str):
|
||||
value = dt.fromisoformat(value).date()
|
||||
|
||||
setattr(translation, column.name, value)
|
||||
|
||||
metrics = {
|
||||
'total_tokens': translation.prompt_tokens + translation.completion_tokens,
|
||||
'prompt_tokens': translation.prompt_tokens,
|
||||
'completion_tokens': translation.completion_tokens,
|
||||
'time_elapsed': 0,
|
||||
'interaction_type': 'TRANSLATION-CACHE'
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
|
||||
return translation
|
||||
|
||||
def _should_cache(self, value) -> bool:
|
||||
"""Validate if the translation should be cached"""
|
||||
if value is None:
|
||||
return False
|
||||
|
||||
# Handle both TranslationCache objects and serialized data (dict)
|
||||
if isinstance(value, TranslationCache):
|
||||
return value.cache_key is not None
|
||||
elif isinstance(value, dict):
|
||||
return value.get('cache_key') is not None
|
||||
|
||||
return False
|
||||
|
||||
def get_translation(self, text: str, target_lang: str, source_lang: str = None, context: str = None) -> Optional[
|
||||
TranslationCache]:
|
||||
"""
|
||||
Get the translation for a text in a specific language
|
||||
|
||||
Args:
|
||||
text: The text to be translated
|
||||
target_lang: The target language for the translation
|
||||
source_lang: The source language of the text to be translated
|
||||
context: Optional context for the translation
|
||||
|
||||
Returns:
|
||||
TranslationCache instance if found, None otherwise
|
||||
"""
|
||||
if not context:
|
||||
context = 'No context provided.'
|
||||
|
||||
def creator_func(hash_key: str) -> Optional[TranslationCache]:
|
||||
# Check if translation already exists in database
|
||||
existing_translation = db.session.query(TranslationCache).filter_by(cache_key=hash_key).first()
|
||||
|
||||
if existing_translation:
|
||||
# Update last used timestamp
|
||||
existing_translation.last_used_at = dt.now(tz=tz.utc)
|
||||
metrics = {
|
||||
'total_tokens': existing_translation.prompt_tokens + existing_translation.completion_tokens,
|
||||
'prompt_tokens': existing_translation.prompt_tokens,
|
||||
'completion_tokens': existing_translation.completion_tokens,
|
||||
'time_elapsed': 0,
|
||||
'interaction_type': 'TRANSLATION-DB'
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
db.session.commit()
|
||||
return existing_translation
|
||||
|
||||
# Translation not found in DB, need to create it
|
||||
# Get the translation and metrics
|
||||
translated_text, metrics = self.translate_text(
|
||||
text_to_translate=text,
|
||||
target_lang=target_lang,
|
||||
source_lang=source_lang,
|
||||
context=context
|
||||
)
|
||||
|
||||
# Create new translation cache record
|
||||
new_translation = TranslationCache(
|
||||
cache_key=hash_key,
|
||||
source_text=text,
|
||||
translated_text=translated_text,
|
||||
source_language=source_lang,
|
||||
target_language=target_lang,
|
||||
context=context,
|
||||
prompt_tokens=metrics.get('prompt_tokens', 0),
|
||||
completion_tokens=metrics.get('completion_tokens', 0),
|
||||
created_at=dt.now(tz=tz.utc),
|
||||
created_by=getattr(current_user, 'id', None) if 'current_user' in globals() else None,
|
||||
updated_at=dt.now(tz=tz.utc),
|
||||
updated_by=getattr(current_user, 'id', None) if 'current_user' in globals() else None,
|
||||
last_used_at=dt.now(tz=tz.utc)
|
||||
)
|
||||
|
||||
# Save to database
|
||||
db.session.add(new_translation)
|
||||
db.session.commit()
|
||||
|
||||
return new_translation
|
||||
|
||||
# Generate the hash key using your existing method
|
||||
hash_key = self._generate_cache_key(text, target_lang, source_lang, context)
|
||||
|
||||
# Pass the hash_key to the get method
|
||||
return self.get(creator_func, hash_key=hash_key)
|
||||
|
||||
def invalidate_tenant_translations(self, tenant_id: int):
|
||||
"""Invalidate cached translations for specific tenant"""
|
||||
self.invalidate(tenant_id=tenant_id)
|
||||
|
||||
def _generate_cache_key(self, text: str, target_lang: str, source_lang: str = None, context: str = None) -> str:
|
||||
"""Generate cache key for a translation"""
|
||||
cache_data = {
|
||||
"text": text.strip(),
|
||||
"target_lang": target_lang.lower(),
|
||||
"source_lang": source_lang.lower() if source_lang else None,
|
||||
"context": context.strip() if context else None,
|
||||
}
|
||||
|
||||
cache_string = json.dumps(cache_data, sort_keys=True, ensure_ascii=False)
|
||||
return xxhash.xxh64(cache_string.encode('utf-8')).hexdigest()
|
||||
|
||||
def translate_text(self, text_to_translate: str, target_lang: str, source_lang: str = None, context: str = None) \
|
||||
-> tuple[str, dict[str, int | float]]:
|
||||
target_language = current_app.config['SUPPORTED_LANGUAGE_ISO639_1_LOOKUP'][target_lang]
|
||||
prompt_params = {
|
||||
"text_to_translate": text_to_translate,
|
||||
"target_language": target_language,
|
||||
}
|
||||
if context:
|
||||
template, llm = get_template("translation_with_context")
|
||||
prompt_params["context"] = context
|
||||
else:
|
||||
template, llm = get_template("translation_without_context")
|
||||
|
||||
# Add a metrics handler to capture usage
|
||||
|
||||
metrics_handler = PersistentLLMMetricsHandler()
|
||||
existing_callbacks = llm.callbacks
|
||||
llm.callbacks = existing_callbacks + [metrics_handler]
|
||||
|
||||
translation_prompt = ChatPromptTemplate.from_template(template)
|
||||
|
||||
setup = RunnablePassthrough()
|
||||
|
||||
chain = (setup | translation_prompt | llm | StrOutputParser())
|
||||
|
||||
translation = chain.invoke(prompt_params)
|
||||
|
||||
# Remove double square brackets from translation
|
||||
translation = re.sub(r'\[\[(.*?)\]\]', r'\1', translation)
|
||||
|
||||
metrics = metrics_handler.get_metrics()
|
||||
|
||||
return translation, metrics
|
||||
|
||||
def register_translation_cache_handlers(cache_manager) -> None:
|
||||
"""Register translation cache handlers with cache manager"""
|
||||
cache_manager.register_handler(
|
||||
TranslationCacheHandler,
|
||||
'eveai_model' # Use existing eveai_model region
|
||||
)
|
||||
@@ -1,3 +1,5 @@
|
||||
import ssl
|
||||
|
||||
from celery import Celery
|
||||
from kombu import Queue
|
||||
from werkzeug.local import LocalProxy
|
||||
@@ -10,24 +12,63 @@ def init_celery(celery, app, is_beat=False):
|
||||
celery_app.main = app.name
|
||||
|
||||
celery_config = {
|
||||
'broker_url': app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379/0'),
|
||||
'broker_url': app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379/0'),
|
||||
'result_backend': app.config.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0'),
|
||||
'task_serializer': app.config.get('CELERY_TASK_SERIALIZER', 'json'),
|
||||
'result_serializer': app.config.get('CELERY_RESULT_SERIALIZER', 'json'),
|
||||
'accept_content': app.config.get('CELERY_ACCEPT_CONTENT', ['json']),
|
||||
'timezone': app.config.get('CELERY_TIMEZONE', 'UTC'),
|
||||
'enable_utc': app.config.get('CELERY_ENABLE_UTC', True),
|
||||
# connection pools
|
||||
# 'broker_pool_limit': app.config.get('CELERY_BROKER_POOL_LIMIT', 10),
|
||||
}
|
||||
|
||||
# Transport options (timeouts, max_connections for Redis transport)
|
||||
# broker_transport_options = {
|
||||
# 'master_name': None, # only relevant for Sentinel; otherwise harmless
|
||||
# 'max_connections': 20,
|
||||
# 'retry_on_timeout': True,
|
||||
# 'socket_connect_timeout': 5,
|
||||
# 'socket_timeout': 5,
|
||||
# }
|
||||
# celery_config['broker_transport_options'] = broker_transport_options
|
||||
#
|
||||
# # Backend transport options (Redis backend accepts similar timeouts)
|
||||
# result_backend_transport_options = {
|
||||
# 'retry_on_timeout': True,
|
||||
# 'socket_connect_timeout': 5,
|
||||
# 'socket_timeout': 5,
|
||||
# # max_connections may be supported on newer Celery/redis backends; harmless if ignored
|
||||
# 'max_connections': 20,
|
||||
# }
|
||||
# celery_config['result_backend_transport_options'] = result_backend_transport_options
|
||||
|
||||
# TLS (only when cert is provided or your URLs are rediss://)
|
||||
ssl_opts = None
|
||||
cert_path = app.config.get('REDIS_CA_CERT_PATH')
|
||||
if cert_path:
|
||||
ssl_opts = {
|
||||
'ssl_cert_reqs': ssl.CERT_REQUIRED,
|
||||
'ssl_ca_certs': cert_path,
|
||||
'ssl_check_hostname': app.config.get('REDIS_SSL_CHECK_HOSTNAME', True),
|
||||
}
|
||||
app.logger.info(
|
||||
"SSL configured for Celery Redis connection (CA: %s, hostname-check: %s)",
|
||||
cert_path,
|
||||
'enabled' if app.config.get('REDIS_SSL_CHECK_HOSTNAME', True) else 'disabled (IP)'
|
||||
)
|
||||
celery_config['broker_use_ssl'] = ssl_opts
|
||||
celery_config['redis_backend_use_ssl'] = ssl_opts
|
||||
|
||||
# Beat/RedBeat
|
||||
if is_beat:
|
||||
# Add configurations specific to Beat scheduler
|
||||
celery_config['beat_scheduler'] = 'redbeat.RedBeatScheduler'
|
||||
celery_config['redbeat_lock_key'] = 'redbeat::lock'
|
||||
celery_config['beat_max_loop_interval'] = 10 # Adjust as needed
|
||||
celery_config['beat_max_loop_interval'] = 10
|
||||
|
||||
celery_app.conf.update(**celery_config)
|
||||
|
||||
# Task queues for workers only
|
||||
# Queues for workers (note: Redis ignores routing_key and priority features like RabbitMQ)
|
||||
if not is_beat:
|
||||
celery_app.conf.task_queues = (
|
||||
Queue('default', routing_key='task.#'),
|
||||
@@ -60,6 +101,7 @@ def init_celery(celery, app, is_beat=False):
|
||||
|
||||
|
||||
def make_celery(app_name, config):
|
||||
# keep API but return the single instance
|
||||
return celery_app
|
||||
|
||||
|
||||
|
||||
172
common/utils/chat_utils.py
Normal file
172
common/utils/chat_utils.py
Normal file
@@ -0,0 +1,172 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
"""
|
||||
Utility functions for chat customization.
|
||||
"""
|
||||
from flask import current_app
|
||||
|
||||
|
||||
def get_default_chat_customisation(tenant_customisation=None):
|
||||
"""
|
||||
Get chat customization options with default values for missing options.
|
||||
|
||||
Args:
|
||||
tenant_customisation (dict or str, optional): The tenant's customization options.
|
||||
Defaults to None. Can be a dict or a JSON string.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing all customization options with default values
|
||||
for any missing options.
|
||||
"""
|
||||
# Default customization options
|
||||
default_customisation = {
|
||||
'sidebar_markdown': '',
|
||||
'sidebar_color': '#f8f9fa',
|
||||
'sidebar_background': '#2c3e50',
|
||||
'markdown_background_color': 'transparent',
|
||||
'markdown_text_color': '#ffffff',
|
||||
'gradient_start_color': '#f5f7fa',
|
||||
'gradient_end_color': '#c3cfe2',
|
||||
'progress_tracker_insights': 'No Information',
|
||||
'form_title_display': 'Full Title',
|
||||
'active_background_color': '#ffffff',
|
||||
'history_background': 10,
|
||||
'ai_message_background': '#ffffff',
|
||||
'ai_message_text_color': '#212529',
|
||||
'human_message_background': '#212529',
|
||||
'human_message_text_color': '#ffffff',
|
||||
'human_message_inactive_text_color': '#808080'
|
||||
}
|
||||
|
||||
# If no tenant customization is provided, return the defaults
|
||||
if tenant_customisation is None:
|
||||
return default_customisation
|
||||
|
||||
# Start with the default customization
|
||||
customisation = default_customisation.copy()
|
||||
|
||||
# Convert JSON string to dict if needed
|
||||
if isinstance(tenant_customisation, str):
|
||||
try:
|
||||
tenant_customisation = json.loads(tenant_customisation)
|
||||
except json.JSONDecodeError as e:
|
||||
current_app.logger.error(f"Error parsing JSON customisation: {e}")
|
||||
return default_customisation
|
||||
|
||||
# Update with tenant customization
|
||||
if tenant_customisation:
|
||||
for key, value in tenant_customisation.items():
|
||||
if key in customisation:
|
||||
customisation[key] = value
|
||||
|
||||
return customisation
|
||||
|
||||
|
||||
def hex_to_rgb(hex_color):
|
||||
"""
|
||||
Convert hex color to RGB tuple.
|
||||
|
||||
Args:
|
||||
hex_color (str): Hex color string (e.g., '#ffffff' or 'ffffff')
|
||||
|
||||
Returns:
|
||||
tuple: RGB values as (r, g, b)
|
||||
"""
|
||||
# Remove # if present
|
||||
hex_color = hex_color.lstrip('#')
|
||||
|
||||
# Handle 3-character hex codes
|
||||
if len(hex_color) == 3:
|
||||
hex_color = ''.join([c*2 for c in hex_color])
|
||||
|
||||
# Convert to RGB
|
||||
try:
|
||||
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
|
||||
except ValueError:
|
||||
# Return white as fallback
|
||||
return (255, 255, 255)
|
||||
|
||||
|
||||
def adjust_color_alpha(percentage):
|
||||
"""
|
||||
Convert percentage to RGBA color with appropriate base color and alpha.
|
||||
|
||||
Args:
|
||||
percentage (int): Percentage (-50 to 50)
|
||||
Positive = white base (lighten)
|
||||
Negative = black base (darken)
|
||||
Zero = transparent
|
||||
|
||||
Returns:
|
||||
str: RGBA color string for CSS
|
||||
"""
|
||||
if percentage == 0:
|
||||
return 'rgba(255, 255, 255, 0)' # Volledig transparant
|
||||
|
||||
# Bepaal basis kleur
|
||||
if percentage > 0:
|
||||
# Positief = wit voor verheldering
|
||||
base_color = (255, 255, 255)
|
||||
else:
|
||||
# Negatief = zwart voor verdonkering
|
||||
base_color = (0, 0, 0)
|
||||
|
||||
# Bereken alpha op basis van percentage (max 50 = alpha 1.0)
|
||||
alpha = abs(percentage) / 50.0
|
||||
alpha = max(0.0, min(1.0, alpha)) # Zorg voor 0.0-1.0 range
|
||||
|
||||
return f'rgba({base_color[0]}, {base_color[1]}, {base_color[2]}, {alpha})'
|
||||
|
||||
|
||||
def adjust_color_brightness(hex_color, percentage):
|
||||
"""
|
||||
Adjust the brightness of a hex color by a percentage.
|
||||
|
||||
Args:
|
||||
hex_color (str): Hex color string (e.g., '#ffffff')
|
||||
percentage (int): Percentage to adjust (-100 to 100)
|
||||
Positive = lighter, Negative = darker
|
||||
|
||||
Returns:
|
||||
str: RGBA color string for CSS (e.g., 'rgba(255, 255, 255, 0.9)')
|
||||
"""
|
||||
if not hex_color or not isinstance(hex_color, str):
|
||||
return 'rgba(255, 255, 255, 0.1)'
|
||||
|
||||
# Get RGB values
|
||||
r, g, b = hex_to_rgb(hex_color)
|
||||
|
||||
# Calculate adjustment factor
|
||||
if percentage > 0:
|
||||
# Lighten: move towards white
|
||||
factor = percentage / 100.0
|
||||
r = int(r + (255 - r) * factor)
|
||||
g = int(g + (255 - g) * factor)
|
||||
b = int(b + (255 - b) * factor)
|
||||
else:
|
||||
# Darken: move towards black
|
||||
factor = abs(percentage) / 100.0
|
||||
r = int(r * (1 - factor))
|
||||
g = int(g * (1 - factor))
|
||||
b = int(b * (1 - factor))
|
||||
|
||||
# Ensure values are within 0-255 range
|
||||
r = max(0, min(255, r))
|
||||
g = max(0, min(255, g))
|
||||
b = max(0, min(255, b))
|
||||
|
||||
# Return as rgba with slight transparency for better blending
|
||||
return f'rgba({r}, {g}, {b}, 0.9)'
|
||||
|
||||
|
||||
def get_base_background_color():
|
||||
"""
|
||||
Get the base background color for history adjustments.
|
||||
This should be the main chat background color.
|
||||
|
||||
Returns:
|
||||
str: Hex color string
|
||||
"""
|
||||
# Use a neutral base color that works well with adjustments
|
||||
return '#f8f9fa'
|
||||
@@ -21,7 +21,7 @@ class TaggingField(BaseModel):
|
||||
@field_validator('type', mode='before')
|
||||
@classmethod
|
||||
def validate_type(cls, v: str) -> str:
|
||||
valid_types = ['string', 'integer', 'float', 'date', 'enum']
|
||||
valid_types = ['string', 'integer', 'float', 'date', 'enum', 'color']
|
||||
if v not in valid_types:
|
||||
raise ValueError(f'type must be one of {valid_types}')
|
||||
return v
|
||||
@@ -243,7 +243,7 @@ class ArgumentDefinition(BaseModel):
|
||||
@field_validator('type')
|
||||
@classmethod
|
||||
def validate_type(cls, v: str) -> str:
|
||||
valid_types = ['string', 'integer', 'float', 'date', 'enum']
|
||||
valid_types = ['string', 'integer', 'float', 'date', 'enum', 'color']
|
||||
if v not in valid_types:
|
||||
raise ValueError(f'type must be one of {valid_types}')
|
||||
return v
|
||||
@@ -256,7 +256,8 @@ class ArgumentDefinition(BaseModel):
|
||||
'integer': NumericConstraint,
|
||||
'float': NumericConstraint,
|
||||
'date': DateConstraint,
|
||||
'enum': EnumConstraint
|
||||
'enum': EnumConstraint,
|
||||
'color': StringConstraint
|
||||
}
|
||||
|
||||
expected_type = expected_constraint_types.get(self.type)
|
||||
|
||||
222
common/utils/content_utils.py
Normal file
222
common/utils/content_utils.py
Normal file
@@ -0,0 +1,222 @@
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
from packaging import version
|
||||
from flask import current_app
|
||||
|
||||
class ContentManager:
|
||||
def __init__(self, app=None):
|
||||
self.app = app
|
||||
if app:
|
||||
self.init_app(app)
|
||||
|
||||
def init_app(self, app):
|
||||
self.app = app
|
||||
|
||||
# Controleer of het pad bestaat
|
||||
# if not os.path.exists(app.config['CONTENT_DIR']):
|
||||
# logger.warning(f"Content directory not found at: {app.config['CONTENT_DIR']}")
|
||||
# else:
|
||||
# logger.info(f"Content directory configured at: {app.config['CONTENT_DIR']}")
|
||||
|
||||
def get_content_path(self, content_type, major_minor=None, patch=None):
|
||||
"""
|
||||
Geef het volledige pad naar een contentbestand
|
||||
|
||||
Args:
|
||||
content_type (str): Type content (bv. 'changelog', 'terms')
|
||||
major_minor (str, optional): Major.Minor versie (bv. '1.0')
|
||||
patch (str, optional): Patchnummer (bv. '5')
|
||||
|
||||
Returns:
|
||||
str: Volledige pad naar de content map of bestand
|
||||
"""
|
||||
content_path = os.path.join(self.app.config['CONTENT_DIR'], content_type)
|
||||
|
||||
if major_minor:
|
||||
content_path = os.path.join(content_path, major_minor)
|
||||
|
||||
if patch:
|
||||
content_path = os.path.join(content_path, f"{major_minor}.{patch}.md")
|
||||
|
||||
return content_path
|
||||
|
||||
def _parse_version(self, filename):
|
||||
"""Parse een versienummer uit een bestandsnaam"""
|
||||
match = re.match(r'(\d+\.\d+)\.(\d+)\.md', filename)
|
||||
if match:
|
||||
return match.group(1), match.group(2)
|
||||
return None, None
|
||||
|
||||
def get_latest_version(self, content_type, major_minor=None):
|
||||
"""
|
||||
Verkrijg de laatste versie van een bepaald contenttype
|
||||
|
||||
Args:
|
||||
content_type (str): Type content (bv. 'changelog', 'terms')
|
||||
major_minor (str, optional): Specifieke major.minor versie, anders de hoogste
|
||||
|
||||
Returns:
|
||||
tuple: (major_minor, patch, full_version) of None als niet gevonden
|
||||
"""
|
||||
try:
|
||||
# Basispad voor dit contenttype
|
||||
content_path = os.path.join(self.app.config['CONTENT_DIR'], content_type)
|
||||
|
||||
if not os.path.exists(content_path):
|
||||
current_app.logger.error(f"Content path does not exist: {content_path}")
|
||||
return None
|
||||
|
||||
# Als geen major_minor opgegeven, vind de hoogste
|
||||
if not major_minor:
|
||||
available_versions = [f for f in os.listdir(content_path) if not f.startswith('.')]
|
||||
if not available_versions:
|
||||
return None
|
||||
|
||||
# Sorteer op versienummer (major.minor)
|
||||
available_versions.sort(key=lambda v: version.parse(v))
|
||||
major_minor = available_versions[-1]
|
||||
|
||||
# Nu we major_minor hebben, zoek de hoogste patch
|
||||
major_minor_path = os.path.join(content_path, major_minor)
|
||||
current_app.logger.debug(f"Major/Minor path: {major_minor_path}")
|
||||
|
||||
if not os.path.exists(major_minor_path):
|
||||
current_app.logger.error(f"Version path does not exist: {major_minor_path}")
|
||||
return None
|
||||
|
||||
files = [f for f in os.listdir(major_minor_path) if not f.startswith('.')]
|
||||
current_app.logger.debug(f"Files in version path: {files}")
|
||||
version_files = []
|
||||
|
||||
for file in files:
|
||||
mm, p = self._parse_version(file)
|
||||
current_app.logger.debug(f"File: {file}, mm: {mm}, p: {p}")
|
||||
if mm == major_minor and p:
|
||||
version_files.append((mm, p, f"{mm}.{p}"))
|
||||
|
||||
if not version_files:
|
||||
return None
|
||||
|
||||
# Sorteer op patch nummer
|
||||
version_files.sort(key=lambda v: int(v[1]))
|
||||
|
||||
current_app.logger.debug(f"Latest version: {version_files[-1]}")
|
||||
return version_files[-1]
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error finding latest version for {content_type}: {str(e)}")
|
||||
return None
|
||||
|
||||
def read_content(self, content_type, major_minor=None, patch=None):
|
||||
"""
|
||||
Lees content met versieondersteuning
|
||||
|
||||
Als major_minor en patch niet zijn opgegeven, wordt de laatste versie gebruikt.
|
||||
Als alleen major_minor is opgegeven, wordt de laatste patch van die versie gebruikt.
|
||||
|
||||
Args:
|
||||
content_type (str): Type content (bv. 'changelog', 'terms')
|
||||
major_minor (str, optional): Major.Minor versie (bv. '1.0')
|
||||
patch (str, optional): Patchnummer (bv. '5')
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'content': str,
|
||||
'version': str,
|
||||
'content_type': str
|
||||
} of None bij fout
|
||||
"""
|
||||
try:
|
||||
current_app.logger.debug(f"Reading content {content_type}")
|
||||
# Als geen versie opgegeven, vind de laatste
|
||||
if not major_minor:
|
||||
version_info = self.get_latest_version(content_type)
|
||||
if not version_info:
|
||||
current_app.logger.error(f"No versions found for {content_type}")
|
||||
return None
|
||||
|
||||
major_minor, patch, full_version = version_info
|
||||
|
||||
# Als geen patch opgegeven, vind de laatste patch voor deze major_minor
|
||||
elif not patch:
|
||||
version_info = self.get_latest_version(content_type, major_minor)
|
||||
if not version_info:
|
||||
current_app.logger.error(f"No versions found for {content_type} {major_minor}")
|
||||
return None
|
||||
|
||||
major_minor, patch, full_version = version_info
|
||||
else:
|
||||
full_version = f"{major_minor}.{patch}"
|
||||
|
||||
# Nu hebben we major_minor en patch, lees het bestand
|
||||
file_path = self.get_content_path(content_type, major_minor, patch)
|
||||
current_app.logger.debug(f"Content File path: {file_path}")
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
current_app.logger.error(f"Content file does not exist: {file_path}")
|
||||
return None
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as file:
|
||||
content = file.read()
|
||||
|
||||
current_app.logger.debug(f"Content read: {content}")
|
||||
|
||||
return {
|
||||
'content': content,
|
||||
'version': full_version,
|
||||
'content_type': content_type
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error reading content {content_type} {major_minor}.{patch}: {str(e)}")
|
||||
return None
|
||||
|
||||
def list_content_types(self):
|
||||
"""Lijst alle beschikbare contenttypes op"""
|
||||
try:
|
||||
return [d for d in os.listdir(self.app.config['CONTENT_DIR'])
|
||||
if os.path.isdir(os.path.join(self.app.config['CONTENT_DIR'], d))]
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error listing content types: {str(e)}")
|
||||
return []
|
||||
|
||||
def list_versions(self, content_type):
|
||||
"""
|
||||
Lijst alle beschikbare versies voor een contenttype
|
||||
|
||||
Returns:
|
||||
list: Lijst van dicts met versie-informatie
|
||||
[{'version': '1.0.0', 'path': '/path/to/file', 'date_modified': datetime}]
|
||||
"""
|
||||
versions = []
|
||||
try:
|
||||
content_path = os.path.join(self.app.config['CONTENT_DIR'], content_type)
|
||||
|
||||
if not os.path.exists(content_path):
|
||||
return []
|
||||
|
||||
for major_minor in os.listdir(content_path):
|
||||
major_minor_path = os.path.join(content_path, major_minor)
|
||||
|
||||
if not os.path.isdir(major_minor_path):
|
||||
continue
|
||||
|
||||
for file in os.listdir(major_minor_path):
|
||||
mm, p = self._parse_version(file)
|
||||
if mm and p:
|
||||
file_path = os.path.join(major_minor_path, file)
|
||||
mod_time = os.path.getmtime(file_path)
|
||||
versions.append({
|
||||
'version': f"{mm}.{p}",
|
||||
'path': file_path,
|
||||
'date_modified': mod_time
|
||||
})
|
||||
|
||||
# Sorteer op versienummer
|
||||
versions.sort(key=lambda v: version.parse(v['version']))
|
||||
return versions
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error listing versions for {content_type}: {str(e)}")
|
||||
return []
|
||||
@@ -1,9 +1,9 @@
|
||||
"""Database related functions"""
|
||||
from os import popen
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy import text, event
|
||||
from sqlalchemy.schema import CreateSchema
|
||||
from sqlalchemy.exc import InternalError
|
||||
from sqlalchemy.orm import sessionmaker, scoped_session
|
||||
from sqlalchemy.orm import sessionmaker, scoped_session, Session as SASession
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from flask import current_app
|
||||
|
||||
@@ -16,6 +16,67 @@ class Database:
|
||||
def __init__(self, tenant: str) -> None:
|
||||
self.schema = str(tenant)
|
||||
|
||||
# --- Session / Transaction events to ensure correct search_path per transaction ---
|
||||
@event.listens_for(SASession, "after_begin")
|
||||
def _set_search_path_per_tx(session, transaction, connection):
|
||||
"""Ensure each transaction sees the right tenant schema, regardless of
|
||||
which pooled connection is used. Uses SET LOCAL so it is scoped to the tx.
|
||||
"""
|
||||
schema = session.info.get("tenant_schema")
|
||||
if schema:
|
||||
current_app.logger.debug(f"DBCTX tx_begin schema={schema}")
|
||||
try:
|
||||
connection.exec_driver_sql(f'SET LOCAL search_path TO "{schema}", public')
|
||||
# Optional visibility/logging for debugging
|
||||
sp = connection.exec_driver_sql("SHOW search_path").scalar()
|
||||
try:
|
||||
current_app.logger.info(f"DBCTX tx_begin conn_id={id(connection.connection)} search_path={sp}")
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
try:
|
||||
current_app.logger.error(f"Failed to SET LOCAL search_path for schema {schema}: {e!r}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _log_db_context(self, origin: str = "") -> None:
|
||||
"""Log key DB context info to diagnose schema/search_path issues.
|
||||
|
||||
Collects and logs in a single structured line:
|
||||
- current_database()
|
||||
- inet_server_addr(), inet_server_port()
|
||||
- SHOW search_path
|
||||
- current_schema()
|
||||
- to_regclass('interaction')
|
||||
- to_regclass('<tenant>.interaction')
|
||||
"""
|
||||
try:
|
||||
db_name = db.session.execute(text("SELECT current_database()"))\
|
||||
.scalar()
|
||||
host = db.session.execute(text("SELECT inet_server_addr()"))\
|
||||
.scalar()
|
||||
port = db.session.execute(text("SELECT inet_server_port()"))\
|
||||
.scalar()
|
||||
search_path = db.session.execute(text("SHOW search_path"))\
|
||||
.scalar()
|
||||
current_schema = db.session.execute(text("SELECT current_schema()"))\
|
||||
.scalar()
|
||||
reg_unqualified = db.session.execute(text("SELECT to_regclass('interaction')"))\
|
||||
.scalar()
|
||||
qualified = f"{self.schema}.interaction"
|
||||
reg_qualified = db.session.execute(
|
||||
text("SELECT to_regclass(:qn)"),
|
||||
{"qn": qualified}
|
||||
).scalar()
|
||||
current_app.logger.info(
|
||||
"DBCTX origin=%s db=%s host=%s port=%s search_path=%s current_schema=%s to_regclass(interaction)=%s to_regclass(%s)=%s",
|
||||
origin, db_name, host, port, search_path, current_schema, reg_unqualified, qualified, reg_qualified
|
||||
)
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(
|
||||
f"DBCTX logging failed at {origin} for schema {self.schema}: {e!r}"
|
||||
)
|
||||
|
||||
def get_engine(self):
|
||||
"""create new schema engine"""
|
||||
return db.engine.execution_options(
|
||||
@@ -46,12 +107,38 @@ class Database:
|
||||
|
||||
def create_tables(self):
|
||||
"""create tables in for schema"""
|
||||
db.metadata.create_all(self.get_engine())
|
||||
try:
|
||||
db.metadata.create_all(self.get_engine())
|
||||
except SQLAlchemyError as e:
|
||||
current_app.logger.error(f"💔 Error creating tables for schema {self.schema}: {e.args}")
|
||||
|
||||
def switch_schema(self):
|
||||
"""switch between tenant/public database schema"""
|
||||
db.session.execute(text(f'set search_path to "{self.schema}", public'))
|
||||
db.session.commit()
|
||||
"""switch between tenant/public database schema with diagnostics logging"""
|
||||
# Record the desired tenant schema on the active Session so events can use it
|
||||
try:
|
||||
db.session.info["tenant_schema"] = self.schema
|
||||
except Exception:
|
||||
pass
|
||||
# Log the context before switching
|
||||
self._log_db_context("before_switch")
|
||||
try:
|
||||
db.session.execute(text(f'set search_path to "{self.schema}", public'))
|
||||
db.session.commit()
|
||||
except SQLAlchemyError as e:
|
||||
# Rollback on error to avoid InFailedSqlTransaction and log details
|
||||
try:
|
||||
db.session.rollback()
|
||||
except Exception:
|
||||
pass
|
||||
current_app.logger.error(
|
||||
f"Error switching search_path to {self.schema}: {e!r}"
|
||||
)
|
||||
# Also log context after failure
|
||||
self._log_db_context("after_switch_failed")
|
||||
# Re-raise to let caller decide handling if needed
|
||||
raise
|
||||
# Log the context after successful switch
|
||||
self._log_db_context("after_switch")
|
||||
|
||||
def migrate_tenant_schema(self):
|
||||
"""migrate tenant database schema for new tenant"""
|
||||
|
||||
@@ -3,7 +3,7 @@ from datetime import datetime as dt, timezone as tz
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from werkzeug.utils import secure_filename
|
||||
from common.models.document import Document, DocumentVersion, Catalog
|
||||
from common.models.document import Document, DocumentVersion, Catalog, Processor
|
||||
from common.extensions import db, minio_client
|
||||
from common.utils.celery_utils import current_celery
|
||||
from flask import current_app
|
||||
@@ -11,15 +11,15 @@ import requests
|
||||
from urllib.parse import urlparse, unquote, urlunparse, parse_qs
|
||||
import os
|
||||
|
||||
from config.type_defs.processor_types import PROCESSOR_TYPES
|
||||
from .config_field_types import normalize_json_field
|
||||
from .eveai_exceptions import (EveAIInvalidLanguageException, EveAIDoubleURLException, EveAIUnsupportedFileType,
|
||||
EveAIInvalidCatalog, EveAIInvalidDocument, EveAIInvalidDocumentVersion, EveAIException)
|
||||
from .minio_utils import MIB_CONVERTOR
|
||||
from ..models.user import Tenant
|
||||
from common.utils.model_logging_utils import set_logging_information, update_logging_information
|
||||
from common.services.entitlements import LicenseUsageServices
|
||||
|
||||
MB_CONVERTOR = 1_048_576
|
||||
|
||||
|
||||
def get_file_size(file):
|
||||
try:
|
||||
@@ -38,7 +38,7 @@ def get_file_size(file):
|
||||
def create_document_stack(api_input, file, filename, extension, tenant_id):
|
||||
# Precheck if we can add a document to the stack
|
||||
|
||||
LicenseUsageServices.check_storage_and_embedding_quota(tenant_id, get_file_size(file)/MB_CONVERTOR)
|
||||
LicenseUsageServices.check_storage_and_embedding_quota(tenant_id, get_file_size(file) / MIB_CONVERTOR)
|
||||
|
||||
# Create the Document
|
||||
catalog_id = int(api_input.get('catalog_id'))
|
||||
@@ -143,7 +143,7 @@ def upload_file_for_version(doc_vers, file, extension, tenant_id):
|
||||
)
|
||||
doc_vers.bucket_name = bn
|
||||
doc_vers.object_name = on
|
||||
doc_vers.file_size = size / MB_CONVERTOR # Convert bytes to MB
|
||||
doc_vers.file_size = size / MIB_CONVERTOR # Convert bytes to MB
|
||||
|
||||
db.session.commit()
|
||||
current_app.logger.info(f'Successfully saved document to MinIO for tenant {tenant_id} for '
|
||||
@@ -192,9 +192,32 @@ def process_url(url, tenant_id):
|
||||
existing_doc = DocumentVersion.query.filter_by(url=url).first()
|
||||
if existing_doc:
|
||||
raise EveAIDoubleURLException
|
||||
# Prepare the headers for maximal chance of downloading url
|
||||
referer = get_referer_from_url(url)
|
||||
headers = {
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/115.0.0.0 Safari/537.36"
|
||||
),
|
||||
"Accept": (
|
||||
"text/html,application/xhtml+xml,application/xml;"
|
||||
"q=0.9,image/avif,image/webp,image/apng,*/*;"
|
||||
"q=0.8,application/signed-exchange;v=b3;q=0.7"
|
||||
),
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Accept-Language": "nl-BE,nl;q=0.9,en-US;q=0.8,en;q=0.7",
|
||||
"Connection": "keep-alive",
|
||||
"Upgrade-Insecure-Requests": "1",
|
||||
"Referer": referer,
|
||||
"Sec-Fetch-Dest": "document",
|
||||
"Sec-Fetch-Mode": "navigate",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Sec-Fetch-User": "?1",
|
||||
}
|
||||
|
||||
# Download the content
|
||||
response = requests.get(url)
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
file_content = response.content
|
||||
|
||||
@@ -353,7 +376,7 @@ def refresh_document_with_content(doc_id: int, tenant_id: int, file_content: byt
|
||||
old_doc_vers = DocumentVersion.query.filter_by(doc_id=doc_id).order_by(desc(DocumentVersion.id)).first()
|
||||
|
||||
# Precheck if we have enough quota for the new version
|
||||
LicenseUsageServices.check_storage_and_embedding_quota(tenant_id, get_file_size(file_content) / MB_CONVERTOR)
|
||||
LicenseUsageServices.check_storage_and_embedding_quota(tenant_id, get_file_size(file_content) / MIB_CONVERTOR)
|
||||
|
||||
# Create new version with same file type as original
|
||||
extension = old_doc_vers.file_type
|
||||
@@ -469,3 +492,19 @@ def lookup_document(tenant_id: int, lookup_criteria: dict, metadata_type: str) -
|
||||
"Error during document lookup",
|
||||
status_code=500
|
||||
)
|
||||
|
||||
def is_file_type_supported_by_catalog(catalog_id, file_type):
|
||||
processors = Processor.query.filter_by(catalog_id=catalog_id).filter_by(active=True).all()
|
||||
|
||||
supported_file_types = []
|
||||
for processor in processors:
|
||||
processor_file_types = PROCESSOR_TYPES[processor.type]['file_types']
|
||||
file_types = [f.strip() for f in processor_file_types.split(",")]
|
||||
supported_file_types.extend(file_types)
|
||||
|
||||
if file_type not in supported_file_types:
|
||||
raise EveAIUnsupportedFileType()
|
||||
|
||||
def get_referer_from_url(url):
|
||||
parsed = urlparse(url)
|
||||
return f"{parsed.scheme}://{parsed.netloc}/"
|
||||
@@ -38,6 +38,8 @@ def create_default_config_from_type_config(type_config):
|
||||
default_config[field_name] = 0
|
||||
elif field_type == "boolean":
|
||||
default_config[field_name] = False
|
||||
elif field_type == "color":
|
||||
default_config[field_name] = "#000000"
|
||||
else:
|
||||
default_config[field_name] = ""
|
||||
|
||||
|
||||
156
common/utils/errors.py
Normal file
156
common/utils/errors.py
Normal file
@@ -0,0 +1,156 @@
|
||||
|
||||
import traceback
|
||||
|
||||
import jinja2
|
||||
from flask import render_template, request, jsonify, redirect, current_app, flash
|
||||
from flask_login import current_user
|
||||
|
||||
from common.utils.eveai_exceptions import EveAINoSessionTenant
|
||||
from common.utils.nginx_utils import prefixed_url_for
|
||||
|
||||
|
||||
def not_found_error(error):
|
||||
profile = current_app.config.get('ERRORS_PROFILE', 'web_app')
|
||||
if profile == 'web_app':
|
||||
if not current_user.is_authenticated:
|
||||
return redirect(prefixed_url_for('security.login', for_redirect=True))
|
||||
current_app.logger.error(f"Not Found Error: {error}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/404.html'), 404
|
||||
|
||||
|
||||
def internal_server_error(error):
|
||||
profile = current_app.config.get('ERRORS_PROFILE', 'web_app')
|
||||
if profile == 'web_app':
|
||||
if not current_user.is_authenticated:
|
||||
return redirect(prefixed_url_for('security.login', for_redirect=True))
|
||||
current_app.logger.error(f"Internal Server Error: {error}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/500.html'), 500
|
||||
|
||||
|
||||
def not_authorised_error(error):
|
||||
profile = current_app.config.get('ERRORS_PROFILE', 'web_app')
|
||||
if profile == 'web_app':
|
||||
if not current_user.is_authenticated:
|
||||
return redirect(prefixed_url_for('security.login', for_redirect=True))
|
||||
current_app.logger.error(f"Not Authorised Error: {error}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/401.html'), 401
|
||||
|
||||
|
||||
def access_forbidden(error):
|
||||
profile = current_app.config.get('ERRORS_PROFILE', 'web_app')
|
||||
if profile == 'web_app':
|
||||
if not current_user.is_authenticated:
|
||||
return redirect(prefixed_url_for('security.login', for_redirect=True))
|
||||
current_app.logger.error(f"Access Forbidden: {error}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/403.html'), 403
|
||||
|
||||
|
||||
def key_error_handler(error):
|
||||
profile = current_app.config.get('ERRORS_PROFILE', 'web_app')
|
||||
# Check if the KeyError is specifically for 'tenant'
|
||||
if str(error) == "'tenant'":
|
||||
if profile == 'web_app':
|
||||
return redirect(prefixed_url_for('security.login', for_redirect=True))
|
||||
else:
|
||||
current_app.logger.warning("Session tenant missing in chat_client context")
|
||||
return render_template('error/401.html'), 401
|
||||
# For other KeyErrors, you might want to log the error and return a generic error page
|
||||
current_app.logger.error(f"Key Error: {error}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/generic.html', error_message="An unexpected error occurred"), 500
|
||||
|
||||
|
||||
def attribute_error_handler(error):
|
||||
"""Handle AttributeError exceptions.
|
||||
|
||||
Specifically catches SQLAlchemy relationship errors when string IDs
|
||||
are used instead of model instances.
|
||||
"""
|
||||
error_msg = str(error)
|
||||
current_app.logger.error(f"AttributeError: {error_msg}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
|
||||
# Handle the SQLAlchemy relationship error specifically
|
||||
if "'str' object has no attribute '_sa_instance_state'" in error_msg:
|
||||
flash('Database relationship error. Please check your form inputs and try again.', 'error')
|
||||
return render_template('error/500.html',
|
||||
error_type="Relationship Error",
|
||||
error_details="A string value was provided where a database object was expected."), 500
|
||||
|
||||
# Handle other AttributeErrors
|
||||
flash('An application error occurred. The technical team has been notified.', 'error')
|
||||
return render_template('error/500.html',
|
||||
error_type="Attribute Error",
|
||||
error_details=error_msg), 500
|
||||
|
||||
|
||||
def no_tenant_selected_error(error):
|
||||
"""Handle errors when no tenant is selected in the current session.
|
||||
|
||||
This typically happens when a session expires or becomes invalid after
|
||||
a long period of inactivity. The user will be redirected to the login page (web_app)
|
||||
or shown an error page (chat_client).
|
||||
"""
|
||||
profile = current_app.config.get('ERRORS_PROFILE', 'web_app')
|
||||
current_app.logger.error(f"No Session Tenant Error: {error}")
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
flash('Your session expired. You will have to re-enter your credentials', 'warning')
|
||||
|
||||
if profile == 'web_app':
|
||||
# Perform logout if user is authenticated
|
||||
if current_user.is_authenticated:
|
||||
from flask_security.utils import logout_user
|
||||
logout_user()
|
||||
# Redirect to login page
|
||||
return redirect(prefixed_url_for('security.login', for_redirect=True))
|
||||
else:
|
||||
# chat_client: render 401 page
|
||||
return render_template('error/401.html'), 401
|
||||
|
||||
|
||||
def general_exception(e):
|
||||
current_app.logger.error(f"Unhandled Exception: {e}", exc_info=True)
|
||||
flash('An application error occurred. The technical team has been notified.', 'error')
|
||||
return render_template('error/500.html',
|
||||
error_type=type(e).__name__,
|
||||
error_details=str(e)), 500
|
||||
|
||||
|
||||
def template_not_found_error(error):
|
||||
"""Handle Jinja2 TemplateNotFound exceptions."""
|
||||
current_app.logger.error(f'Template not found: {error.name}')
|
||||
current_app.logger.error(f'Search Paths: {current_app.jinja_loader.list_templates()}')
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/500.html',
|
||||
error_type="Template Not Found",
|
||||
error_details=f"Template '{error.name}' could not be found."), 404
|
||||
|
||||
|
||||
def template_syntax_error(error):
|
||||
"""Handle Jinja2 TemplateSyntaxError exceptions."""
|
||||
current_app.logger.error(f'Template syntax error: {error.message}')
|
||||
current_app.logger.error(f'In template {error.filename}, line {error.lineno}')
|
||||
current_app.logger.error(traceback.format_exc())
|
||||
return render_template('error/500.html',
|
||||
error_type="Template Syntax Error",
|
||||
error_details=f"Error in template '{error.filename}' at line {error.lineno}: {error.message}"), 500
|
||||
|
||||
|
||||
def register_error_handlers(app, profile: str = 'web_app'):
|
||||
# Store profile in app config to drive handler behavior
|
||||
app.config['ERRORS_PROFILE'] = profile
|
||||
|
||||
app.register_error_handler(404, not_found_error)
|
||||
app.register_error_handler(500, internal_server_error)
|
||||
app.register_error_handler(401, not_authorised_error)
|
||||
app.register_error_handler(403, not_authorised_error)
|
||||
app.register_error_handler(EveAINoSessionTenant, no_tenant_selected_error)
|
||||
app.register_error_handler(KeyError, key_error_handler)
|
||||
app.register_error_handler(AttributeError, attribute_error_handler)
|
||||
app.register_error_handler(jinja2.TemplateNotFound, template_not_found_error)
|
||||
app.register_error_handler(jinja2.TemplateSyntaxError, template_syntax_error)
|
||||
app.register_error_handler(Exception, general_exception)
|
||||
@@ -34,7 +34,25 @@ class EveAIDoubleURLException(EveAIException):
|
||||
class EveAIUnsupportedFileType(EveAIException):
|
||||
"""Raised when an invalid file type is provided"""
|
||||
|
||||
def __init__(self, message="Filetype is not supported", status_code=400, payload=None):
|
||||
def __init__(self, message="Filetype is not supported by current active processors", status_code=400, payload=None):
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
|
||||
class EveAINoProcessorFound(EveAIException):
|
||||
"""Raised when no processor is found for a given file type"""
|
||||
|
||||
def __init__(self, catalog_id, file_type, file_subtype, status_code=400, payload=None):
|
||||
message = f"No active processor found for catalog {catalog_id} with file type {file_type} and subtype {file_subtype}"
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
|
||||
class EveAINoContentFound(EveAIException):
|
||||
"""Raised when no content is found for a given document"""
|
||||
|
||||
def __init__(self, document_id, document_version_id, status_code=400, payload=None):
|
||||
self.document_id = document_id
|
||||
self.document_version_id = document_version_id
|
||||
message = f"No content found while processing Document with ID {document_id} and version {document_version_id}."
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
|
||||
@@ -248,3 +266,14 @@ class EveAIPendingLicensePeriod(EveAIException):
|
||||
message = f"Basic Fee Payment has not been received yet. Please ensure payment has been made, and please wait for payment to be processed."
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
|
||||
class EveAISpecialistExecutionError(EveAIException):
|
||||
"""Raised when an error occurs during specialist execution"""
|
||||
|
||||
def __init__(self, tenant_id, specialist_id, session_id, details, status_code=400, payload=None):
|
||||
message = (f"Error during specialist {specialist_id} execution \n"
|
||||
f"with Session ID {session_id} \n"
|
||||
f"for Tenant {tenant_id}. \n"
|
||||
f"Details: {details} \n"
|
||||
f"The System Administrator has been notified. Please try again later.")
|
||||
super().__init__(message, status_code, payload)
|
||||
|
||||
@@ -4,42 +4,67 @@ from typing import Generator
|
||||
from redis import Redis, RedisError
|
||||
import json
|
||||
from flask import current_app
|
||||
import time
|
||||
|
||||
|
||||
class ExecutionProgressTracker:
|
||||
"""Tracks progress of specialist executions using Redis"""
|
||||
|
||||
# Normalized processing types and aliases
|
||||
PT_COMPLETE = 'EVEAI_COMPLETE'
|
||||
PT_ERROR = 'EVEAI_ERROR'
|
||||
|
||||
_COMPLETE_ALIASES = {'EveAI Specialist Complete', 'Task Complete', 'task complete'}
|
||||
_ERROR_ALIASES = {'EveAI Specialist Error', 'Task Error', 'task error'}
|
||||
|
||||
def __init__(self):
|
||||
try:
|
||||
redis_url = current_app.config['SPECIALIST_EXEC_PUBSUB']
|
||||
|
||||
self.redis = Redis.from_url(redis_url, socket_timeout=5)
|
||||
# Test the connection
|
||||
self.redis.ping()
|
||||
|
||||
# Use shared pubsub pool (lazy connect; no eager ping)
|
||||
from common.utils.redis_pubsub_pool import get_pubsub_client
|
||||
self.redis = get_pubsub_client(current_app)
|
||||
self.expiry = 3600 # 1 hour expiry
|
||||
except RedisError as e:
|
||||
current_app.logger.error(f"Failed to connect to Redis: {str(e)}")
|
||||
raise
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Unexpected error during Redis initialization: {str(e)}")
|
||||
current_app.logger.error(f"Error initializing ExecutionProgressTracker: {str(e)}")
|
||||
raise
|
||||
|
||||
def _get_key(self, execution_id: str) -> str:
|
||||
return f"specialist_execution:{execution_id}"
|
||||
prefix = current_app.config.get('REDIS_PREFIXES', {}).get('pubsub_execution', 'pubsub:execution:')
|
||||
return f"{prefix}{execution_id}"
|
||||
|
||||
def _retry(self, op, attempts: int = 3, base_delay: float = 0.1):
|
||||
"""Retry wrapper for Redis operations with exponential backoff."""
|
||||
last_exc = None
|
||||
for i in range(attempts):
|
||||
try:
|
||||
return op()
|
||||
except RedisError as e:
|
||||
last_exc = e
|
||||
if i == attempts - 1:
|
||||
break
|
||||
delay = base_delay * (3 ** i) # 0.1, 0.3, 0.9
|
||||
current_app.logger.warning(f"Redis operation failed (attempt {i+1}/{attempts}): {e}. Retrying in {delay}s")
|
||||
time.sleep(delay)
|
||||
# Exhausted retries
|
||||
raise last_exc
|
||||
|
||||
def _normalize_processing_type(self, processing_type: str) -> str:
|
||||
if not processing_type:
|
||||
return processing_type
|
||||
p = str(processing_type).strip()
|
||||
if p in self._COMPLETE_ALIASES:
|
||||
return self.PT_COMPLETE
|
||||
if p in self._ERROR_ALIASES:
|
||||
return self.PT_ERROR
|
||||
return p
|
||||
|
||||
def send_update(self, ctask_id: str, processing_type: str, data: dict):
|
||||
"""Send an update about execution progress"""
|
||||
try:
|
||||
current_app.logger.debug(f"Sending update for {ctask_id} with processing type {processing_type} and data:\n"
|
||||
f"{data}")
|
||||
key = self._get_key(ctask_id)
|
||||
|
||||
# First verify Redis is still connected
|
||||
try:
|
||||
self.redis.ping()
|
||||
except RedisError:
|
||||
current_app.logger.error("Lost Redis connection. Attempting to reconnect...")
|
||||
self.__init__() # Reinitialize connection
|
||||
|
||||
processing_type = self._normalize_processing_type(processing_type)
|
||||
update = {
|
||||
'processing_type': processing_type,
|
||||
'data': data,
|
||||
@@ -48,7 +73,7 @@ class ExecutionProgressTracker:
|
||||
|
||||
# Log initial state
|
||||
try:
|
||||
orig_len = self.redis.llen(key)
|
||||
orig_len = self._retry(lambda: self.redis.llen(key))
|
||||
|
||||
# Try to serialize the update and check the result
|
||||
try:
|
||||
@@ -58,13 +83,16 @@ class ExecutionProgressTracker:
|
||||
raise
|
||||
|
||||
# Store update in list with pipeline for atomicity
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.rpush(key, serialized_update)
|
||||
pipe.publish(key, serialized_update)
|
||||
pipe.expire(key, self.expiry)
|
||||
results = pipe.execute()
|
||||
def _pipeline_op():
|
||||
with self.redis.pipeline() as pipe:
|
||||
pipe.rpush(key, serialized_update)
|
||||
pipe.publish(key, serialized_update)
|
||||
pipe.expire(key, self.expiry)
|
||||
return pipe.execute()
|
||||
|
||||
new_len = self.redis.llen(key)
|
||||
results = self._retry(_pipeline_op)
|
||||
|
||||
new_len = self._retry(lambda: self.redis.llen(key))
|
||||
|
||||
if new_len <= orig_len:
|
||||
current_app.logger.error(
|
||||
@@ -81,32 +109,51 @@ class ExecutionProgressTracker:
|
||||
def get_updates(self, ctask_id: str) -> Generator[str, None, None]:
|
||||
key = self._get_key(ctask_id)
|
||||
pubsub = self.redis.pubsub()
|
||||
pubsub.subscribe(key)
|
||||
# Subscribe with retry
|
||||
self._retry(lambda: pubsub.subscribe(key))
|
||||
|
||||
try:
|
||||
# Hint client reconnect interval (optional but helpful)
|
||||
yield "retry: 3000\n\n"
|
||||
|
||||
# First yield any existing updates
|
||||
length = self.redis.llen(key)
|
||||
length = self._retry(lambda: self.redis.llen(key))
|
||||
if length > 0:
|
||||
updates = self.redis.lrange(key, 0, -1)
|
||||
updates = self._retry(lambda: self.redis.lrange(key, 0, -1))
|
||||
for update in updates:
|
||||
update_data = json.loads(update.decode('utf-8'))
|
||||
# Use processing_type for the event
|
||||
yield f"event: {update_data['processing_type']}\n"
|
||||
update_data['processing_type'] = self._normalize_processing_type(update_data.get('processing_type'))
|
||||
yield f"data: {json.dumps(update_data)}\n\n"
|
||||
|
||||
# Then listen for new updates
|
||||
while True:
|
||||
message = pubsub.get_message(timeout=30) # message['type'] is Redis pub/sub type
|
||||
try:
|
||||
message = pubsub.get_message(timeout=30) # message['type'] is Redis pub/sub type
|
||||
except RedisError as e:
|
||||
current_app.logger.warning(f"Redis pubsub get_message error: {e}. Continuing...")
|
||||
time.sleep(0.3)
|
||||
continue
|
||||
|
||||
if message is None:
|
||||
yield ": keepalive\n\n"
|
||||
continue
|
||||
|
||||
if message['type'] == 'message': # This is Redis pub/sub type
|
||||
update_data = json.loads(message['data'].decode('utf-8'))
|
||||
yield f"data: {message['data'].decode('utf-8')}\n\n"
|
||||
update_data['processing_type'] = self._normalize_processing_type(update_data.get('processing_type'))
|
||||
yield f"data: {json.dumps(update_data)}\n\n"
|
||||
|
||||
# Check processing_type for completion
|
||||
if update_data['processing_type'] in ['Task Complete', 'Task Error']:
|
||||
# Unified completion check
|
||||
if update_data['processing_type'] in [self.PT_COMPLETE, self.PT_ERROR]:
|
||||
# Give proxies/clients a chance to flush
|
||||
yield ": closing\n\n"
|
||||
break
|
||||
finally:
|
||||
pubsub.unsubscribe()
|
||||
try:
|
||||
pubsub.unsubscribe()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
pubsub.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
from flask import request, render_template, abort
|
||||
from sqlalchemy import desc, asc
|
||||
|
||||
|
||||
class FilteredListView:
|
||||
def __init__(self, model, template, per_page=10):
|
||||
self.model = model
|
||||
self.template = template
|
||||
self.per_page = per_page
|
||||
|
||||
def get_query(self):
|
||||
return self.model.query
|
||||
|
||||
def apply_filters(self, query):
|
||||
filters = request.args.get('filters', {})
|
||||
for key, value in filters.items():
|
||||
if hasattr(self.model, key):
|
||||
column = getattr(self.model, key)
|
||||
if value.startswith('like:'):
|
||||
query = query.filter(column.like(f"%{value[5:]}%"))
|
||||
else:
|
||||
query = query.filter(column == value)
|
||||
return query
|
||||
|
||||
def apply_sorting(self, query):
|
||||
sort_by = request.args.get('sort_by')
|
||||
if sort_by and hasattr(self.model, sort_by):
|
||||
sort_order = request.args.get('sort_order', 'asc')
|
||||
column = getattr(self.model, sort_by)
|
||||
if sort_order == 'desc':
|
||||
query = query.order_by(desc(column))
|
||||
else:
|
||||
query = query.order_by(asc(column))
|
||||
return query
|
||||
|
||||
def paginate(self, query):
|
||||
page = request.args.get('page', 1, type=int)
|
||||
return query.paginate(page=page, per_page=self.per_page, error_out=False)
|
||||
|
||||
def get(self):
|
||||
query = self.get_query()
|
||||
query = self.apply_filters(query)
|
||||
query = self.apply_sorting(query)
|
||||
pagination = self.paginate(query)
|
||||
|
||||
context = {
|
||||
'items': pagination.items,
|
||||
'pagination': pagination,
|
||||
'model': self.model.__name__,
|
||||
'filters': request.args.get('filters', {}),
|
||||
'sort_by': request.args.get('sort_by'),
|
||||
'sort_order': request.args.get('sort_order', 'asc')
|
||||
}
|
||||
return render_template(self.template, **context)
|
||||
@@ -6,22 +6,17 @@ from flask import current_app
|
||||
|
||||
|
||||
def send_email(to_email, to_name, subject, html):
|
||||
current_app.logger.debug(f"Sending email to {to_email} with subject {subject}")
|
||||
access_key = current_app.config['SW_EMAIL_ACCESS_KEY']
|
||||
secret_key = current_app.config['SW_EMAIL_SECRET_KEY']
|
||||
default_project_id = current_app.config['SW_PROJECT']
|
||||
default_region = "fr-par"
|
||||
current_app.logger.debug(f"Access Key: {access_key}\nSecret Key: {secret_key}\n"
|
||||
f"Default Project ID: {default_project_id}\nDefault Region: {default_region}")
|
||||
client = Client(
|
||||
access_key=access_key,
|
||||
secret_key=secret_key,
|
||||
default_project_id=default_project_id,
|
||||
default_region=default_region
|
||||
)
|
||||
current_app.logger.debug(f"Scaleway Client Initialized")
|
||||
tem = TemV1Alpha1API(client)
|
||||
current_app.logger.debug(f"Tem Initialized")
|
||||
from_ = CreateEmailRequestAddress(email=current_app.config['SW_EMAIL_SENDER'],
|
||||
name=current_app.config['SW_EMAIL_NAME'])
|
||||
to_ = CreateEmailRequestAddress(email=to_email, name=to_name)
|
||||
@@ -34,7 +29,6 @@ def send_email(to_email, to_name, subject, html):
|
||||
html=html,
|
||||
project_id=default_project_id,
|
||||
)
|
||||
current_app.logger.debug(f"Email sent to {to_email}")
|
||||
|
||||
|
||||
def html_to_text(html_content):
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
from minio import Minio
|
||||
from minio.error import S3Error
|
||||
from flask import Flask
|
||||
from flask import Flask, current_app
|
||||
import io
|
||||
from werkzeug.datastructures import FileStorage
|
||||
|
||||
MIB_CONVERTOR = 1_048_576
|
||||
|
||||
|
||||
class MinioClient:
|
||||
def __init__(self):
|
||||
self.client = None
|
||||
|
||||
def init_app(self, app: Flask):
|
||||
app.logger.debug(f"Initializing MinIO client with endpoint: {app.config['MINIO_ENDPOINT']} and secure: {app.config.get('MINIO_USE_HTTPS', False)}")
|
||||
self.client = Minio(
|
||||
app.config['MINIO_ENDPOINT'],
|
||||
access_key=app.config['MINIO_ACCESS_KEY'],
|
||||
@@ -18,27 +22,51 @@ class MinioClient:
|
||||
app.logger.info(f"MinIO client initialized with endpoint: {app.config['MINIO_ENDPOINT']}")
|
||||
|
||||
def generate_bucket_name(self, tenant_id):
|
||||
return f"tenant-{tenant_id}-bucket"
|
||||
tenant_base = current_app.config.get('OBJECT_STORAGE_TENANT_BASE', 'bucket')
|
||||
if tenant_base == 'bucket':
|
||||
return f"tenant-{tenant_id}-bucket"
|
||||
elif tenant_base == 'folder':
|
||||
return current_app.config.get('OBJECT_STORAGE_BUCKET_NAME')
|
||||
else:
|
||||
raise ValueError(f"Invalid OBJECT_STORAGE_TENANT_BASE value: {tenant_base}")
|
||||
|
||||
def create_tenant_bucket(self, tenant_id):
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
try:
|
||||
if not self.client.bucket_exists(bucket_name):
|
||||
self.client.make_bucket(bucket_name)
|
||||
tenant_base = current_app.config.get('OBJECT_STORAGE_TENANT_BASE', 'bucket')
|
||||
if tenant_base == 'bucket':
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
try:
|
||||
if not self.client.bucket_exists(bucket_name):
|
||||
self.client.make_bucket(bucket_name)
|
||||
return bucket_name
|
||||
return bucket_name
|
||||
return bucket_name
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while creating bucket: {err}")
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while creating bucket: {err}")
|
||||
elif tenant_base == 'folder': # In this case, we are working within a predefined bucket
|
||||
return current_app.config.get('OBJECT_STORAGE_BUCKET_NAME')
|
||||
else:
|
||||
raise ValueError(f"Invalid OBJECT_STORAGE_TENANT_BASE value: {tenant_base}")
|
||||
|
||||
def generate_object_name(self, document_id, language, version_id, filename):
|
||||
return f"{document_id}/{language}/{version_id}/{filename}"
|
||||
def generate_object_name(self, tenant_id, document_id, language, version_id, filename):
|
||||
tenant_base = current_app.config.get('OBJECT_STORAGE_TENANT_BASE', 'bucket')
|
||||
if tenant_base == 'bucket':
|
||||
return f"{document_id}/{language}/{version_id}/{filename}"
|
||||
elif tenant_base == 'folder':
|
||||
return f"tenant-{tenant_id}/documents/{document_id}/{language}/{version_id}/{filename}"
|
||||
else:
|
||||
raise ValueError(f"Invalid OBJECT_STORAGE_TENANT_BASE value: {tenant_base}")
|
||||
|
||||
def generate_asset_name(self, asset_version_id, file_name, content_type):
|
||||
return f"assets/{asset_version_id}/{file_name}.{content_type}"
|
||||
def generate_asset_name(self, tenant_id, asset_id, asset_type, content_type):
|
||||
tenant_base = current_app.config.get('OBJECT_STORAGE_TENANT_BASE', 'bucket')
|
||||
if tenant_base == 'bucket':
|
||||
return f"assets/{asset_type}/{asset_id}.{content_type}"
|
||||
elif tenant_base == 'folder':
|
||||
return f"tenant-{tenant_id}/assets/{asset_type}/{asset_id}.{content_type}"
|
||||
else:
|
||||
raise ValueError(f"Invalid OBJECT_STORAGE_TENANT_BASE value: {tenant_base}")
|
||||
|
||||
def upload_document_file(self, tenant_id, document_id, language, version_id, filename, file_data):
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
object_name = self.generate_object_name(document_id, language, version_id, filename)
|
||||
object_name = self.generate_object_name(tenant_id, document_id, language, version_id, filename)
|
||||
|
||||
try:
|
||||
if isinstance(file_data, FileStorage):
|
||||
@@ -57,8 +85,10 @@ class MinioClient:
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while uploading file: {err}")
|
||||
|
||||
def upload_asset_file(self, bucket_name, asset_version_id, file_name, file_type, file_data):
|
||||
object_name = self.generate_asset_name(asset_version_id, file_name, file_type)
|
||||
def upload_asset_file(self, tenant_id: int, asset_id: int, asset_type: str, file_type: str,
|
||||
file_data: bytes | FileStorage | io.BytesIO | str, ) -> tuple[str, str, int]:
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
object_name = self.generate_asset_name(tenant_id, asset_id, asset_type, file_type)
|
||||
|
||||
try:
|
||||
if isinstance(file_data, FileStorage):
|
||||
@@ -73,7 +103,7 @@ class MinioClient:
|
||||
self.client.put_object(
|
||||
bucket_name, object_name, io.BytesIO(file_data), len(file_data)
|
||||
)
|
||||
return object_name, len(file_data)
|
||||
return bucket_name, object_name, len(file_data)
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while uploading asset: {err}")
|
||||
|
||||
@@ -84,6 +114,13 @@ class MinioClient:
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while downloading file: {err}")
|
||||
|
||||
def download_asset_file(self, tenant_id, bucket_name, object_name):
|
||||
try:
|
||||
response = self.client.get_object(bucket_name, object_name)
|
||||
return response.read()
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while downloading asset: {err}")
|
||||
|
||||
def list_document_files(self, tenant_id, document_id, language=None, version_id=None):
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
prefix = f"{document_id}/"
|
||||
@@ -99,9 +136,22 @@ class MinioClient:
|
||||
|
||||
def delete_document_file(self, tenant_id, document_id, language, version_id, filename):
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
object_name = self.generate_object_name(document_id, language, version_id, filename)
|
||||
object_name = self.generate_object_name(tenant_id, document_id, language, version_id, filename)
|
||||
try:
|
||||
self.client.remove_object(bucket_name, object_name)
|
||||
return True
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while deleting file: {err}")
|
||||
|
||||
def delete_object(self, bucket_name, object_name):
|
||||
try:
|
||||
self.client.remove_object(bucket_name, object_name)
|
||||
except S3Error as err:
|
||||
raise Exception(f"Error occurred while deleting object: {err}")
|
||||
|
||||
def get_bucket_size(self, tenant_id: int) -> int:
|
||||
bucket_name = self.generate_bucket_name(tenant_id)
|
||||
total_size = 0
|
||||
for obj in self.client.list_objects(bucket_name, recursive=True):
|
||||
total_size += obj.size
|
||||
return total_size
|
||||
|
||||
@@ -6,7 +6,6 @@ from langchain_core.language_models import BaseChatModel
|
||||
|
||||
from common.langchain.llm_metrics_handler import LLMMetricsHandler
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_mistralai import ChatMistralAI
|
||||
from flask import current_app
|
||||
|
||||
@@ -56,7 +55,9 @@ def replace_variable_in_template(template: str, variable: str, value: str) -> st
|
||||
Returns:
|
||||
str: Template with variable placeholder replaced
|
||||
"""
|
||||
return template.replace(variable, value or "")
|
||||
|
||||
modified_template = template.replace(f"{{{variable}}}", value or "")
|
||||
return modified_template
|
||||
|
||||
|
||||
def get_embedding_model_and_class(tenant_id, catalog_id, full_embedding_name="mistral.mistral-embed"):
|
||||
|
||||
@@ -1,18 +1,108 @@
|
||||
from flask import request, current_app, url_for
|
||||
from flask import request, url_for, current_app
|
||||
from urllib.parse import urlsplit, urlunsplit
|
||||
import re
|
||||
|
||||
VISIBLE_PREFIXES = ('/admin', '/api', '/chat-client')
|
||||
|
||||
|
||||
def _normalize_prefix(raw_prefix: str) -> str:
|
||||
"""Normalize config prefix to internal form '/admin' or '' if not set."""
|
||||
if not raw_prefix:
|
||||
return ''
|
||||
s = str(raw_prefix).strip()
|
||||
if not s:
|
||||
return ''
|
||||
# remove leading/trailing slashes, then add single leading slash
|
||||
s = s.strip('/')
|
||||
if not s:
|
||||
return ''
|
||||
return f"/{s}"
|
||||
|
||||
|
||||
def _get_config_prefix() -> str:
|
||||
"""Return normalized prefix from config EVEAI_APP_PREFIX (config-first)."""
|
||||
try:
|
||||
cfg_val = (current_app.config.get('EVEAI_APP_PREFIX') if current_app else None)
|
||||
return _normalize_prefix(cfg_val)
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
|
||||
def _derive_visible_prefix():
|
||||
# 1) Edge-provided header (beste en meest expliciete bron)
|
||||
xfp = request.headers.get('X-Forwarded-Prefix')
|
||||
current_app.logger.debug(f"X-Forwarded-Prefix: {xfp}")
|
||||
if xfp and any(str(xfp).startswith(p) for p in VISIBLE_PREFIXES):
|
||||
return str(xfp).rstrip('/')
|
||||
|
||||
# 2) Referer fallback: haal het top-level segment uit de Referer path
|
||||
ref = request.headers.get('Referer') or ''
|
||||
try:
|
||||
ref_path = urlsplit(ref).path or ''
|
||||
m = re.match(r'^/(admin|api|chat-client)(?:\b|/)', ref_path)
|
||||
if m:
|
||||
return f"/{m.group(1)}"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 3) Geen prefix bekend
|
||||
return ''
|
||||
|
||||
|
||||
def _visible_prefix_for_runtime() -> str:
|
||||
"""Decide which prefix to use at runtime.
|
||||
Priority: config EVEAI_APP_PREFIX; optional dynamic fallback if enabled.
|
||||
"""
|
||||
cfg_prefix = _get_config_prefix()
|
||||
if cfg_prefix:
|
||||
current_app.logger.debug(f"prefixed_url_for: using config prefix: {cfg_prefix}")
|
||||
return cfg_prefix
|
||||
# Optional dynamic fallback
|
||||
use_fallback = bool(current_app.config.get('EVEAI_USE_DYNAMIC_PREFIX_FALLBACK', False)) if current_app else False
|
||||
if use_fallback:
|
||||
dyn = _derive_visible_prefix()
|
||||
current_app.logger.debug(f"prefixed_url_for: using dynamic fallback prefix: {dyn}")
|
||||
return dyn
|
||||
current_app.logger.debug("prefixed_url_for: no prefix configured, no fallback enabled")
|
||||
return ''
|
||||
|
||||
|
||||
def prefixed_url_for(endpoint, **values):
|
||||
prefix = request.headers.get('X-Forwarded-Prefix', '')
|
||||
scheme = request.headers.get('X-Forwarded-Proto', request.scheme)
|
||||
host = request.headers.get('Host', request.host)
|
||||
|
||||
"""
|
||||
Gedrag:
|
||||
- Default (_external=False, for_redirect=False): retourneer relatief pad (zonder leading '/')
|
||||
voor templates/JS. De dynamische <base> zorgt voor correcte resolutie onder het zichtbare prefix.
|
||||
- _external=True: bouw absolute URL (schema/host). Pad wordt geprefixt met config prefix (indien gezet),
|
||||
of optioneel met dynamische fallback wanneer geactiveerd.
|
||||
- for_redirect=True: geef root-absoluut pad inclusief zichtbaar top-prefix, geschikt
|
||||
voor HTTP Location headers. Backwards compat: _as_location=True wordt behandeld als for_redirect.
|
||||
"""
|
||||
external = values.pop('_external', False)
|
||||
generated_url = url_for(endpoint, **values)
|
||||
# Backwards compatibility met oudere paramnaam
|
||||
if values.pop('_as_location', False):
|
||||
values['for_redirect'] = True
|
||||
for_redirect = values.pop('for_redirect', False)
|
||||
|
||||
generated_url = url_for(endpoint, **values) # bv. "/user/tenant_overview"
|
||||
path, query, fragment = urlsplit(generated_url)[2:5]
|
||||
|
||||
if external:
|
||||
path, query, fragment = urlsplit(generated_url)[2:5]
|
||||
new_path = prefix + path
|
||||
scheme = request.headers.get('X-Forwarded-Proto', request.scheme)
|
||||
host = request.headers.get('Host', request.host)
|
||||
visible_prefix = _visible_prefix_for_runtime()
|
||||
new_path = (visible_prefix.rstrip('/') + path) if (visible_prefix and not path.startswith(visible_prefix)) else path
|
||||
current_app.logger.debug(f"prefixed_url_for external: {scheme}://{host}{new_path}")
|
||||
return urlunsplit((scheme, host, new_path, query, fragment))
|
||||
else:
|
||||
return prefix + generated_url
|
||||
|
||||
if for_redirect:
|
||||
visible_prefix = _visible_prefix_for_runtime()
|
||||
if visible_prefix and not path.startswith(visible_prefix):
|
||||
composed = f"{visible_prefix}{path}"
|
||||
current_app.logger.debug(f"prefixed_url_for redirect: {composed}")
|
||||
return composed
|
||||
current_app.logger.debug(f"prefixed_url_for redirect (no prefix): {path}")
|
||||
return path
|
||||
|
||||
# Default: relatief pad (zonder leading '/')
|
||||
rel = path[1:] if path.startswith('/') else path
|
||||
return rel
|
||||
84
common/utils/redis_pubsub_pool.py
Normal file
84
common/utils/redis_pubsub_pool.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import ssl
|
||||
from typing import Dict, Any
|
||||
|
||||
import redis
|
||||
from flask import Flask
|
||||
|
||||
|
||||
def _build_pubsub_redis_config(app: Flask) -> Dict[str, Any]:
|
||||
"""Build Redis ConnectionPool config for the pubsub/EPT workload using app.config.
|
||||
Does not modify cache or session pools.
|
||||
"""
|
||||
cfg = app.config
|
||||
|
||||
config: Dict[str, Any] = {
|
||||
'host': cfg['REDIS_URL'],
|
||||
'port': cfg['REDIS_PORT'],
|
||||
'db': int(cfg.get('REDIS_SPECIALIST_EXEC_DB', '0')),
|
||||
'max_connections': int(cfg.get('REDIS_PUBSUB_MAX_CONNECTIONS', 200)),
|
||||
'retry_on_timeout': True,
|
||||
'socket_keepalive': True,
|
||||
'socket_keepalive_options': {},
|
||||
'socket_timeout': float(cfg.get('REDIS_PUBSUB_SOCKET_TIMEOUT', 10.0)),
|
||||
'socket_connect_timeout': float(cfg.get('REDIS_PUBSUB_CONNECT_TIMEOUT', 3.0)),
|
||||
}
|
||||
|
||||
# Authentication if present
|
||||
un = cfg.get('REDIS_USER')
|
||||
pw = cfg.get('REDIS_PASS')
|
||||
if un and pw:
|
||||
config.update({'username': un, 'password': pw})
|
||||
|
||||
# TLS when configured
|
||||
cert_path = cfg.get('REDIS_CA_CERT_PATH')
|
||||
if cfg.get('REDIS_SCHEME') == 'rediss' and cert_path:
|
||||
config.update({
|
||||
'connection_class': redis.SSLConnection,
|
||||
'ssl_cert_reqs': ssl.CERT_REQUIRED,
|
||||
'ssl_check_hostname': cfg.get('REDIS_SSL_CHECK_HOSTNAME', True),
|
||||
'ssl_ca_certs': cert_path,
|
||||
})
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def create_pubsub_pool(app: Flask) -> redis.ConnectionPool:
|
||||
"""Create and store the dedicated pubsub ConnectionPool in app.extensions."""
|
||||
if not hasattr(app, 'extensions'):
|
||||
app.extensions = {}
|
||||
|
||||
# Reuse existing if already created
|
||||
pool = app.extensions.get('redis_pubsub_pool')
|
||||
if pool is not None:
|
||||
return pool
|
||||
|
||||
config = _build_pubsub_redis_config(app)
|
||||
pool = redis.ConnectionPool(**config)
|
||||
app.extensions['redis_pubsub_pool'] = pool
|
||||
|
||||
# Log a concise, non-sensitive summary
|
||||
try:
|
||||
summary = {
|
||||
'scheme': app.config.get('REDIS_SCHEME'),
|
||||
'host': app.config.get('REDIS_URL'),
|
||||
'port': app.config.get('REDIS_PORT'),
|
||||
'db': app.config.get('REDIS_SPECIALIST_EXEC_DB', '0'),
|
||||
'ssl_check_hostname': app.config.get('REDIS_SSL_CHECK_HOSTNAME'),
|
||||
'ca_present': bool(app.config.get('REDIS_CA_CERT_PATH')),
|
||||
'max_connections': app.config.get('REDIS_PUBSUB_MAX_CONNECTIONS'),
|
||||
'socket_timeout': app.config.get('REDIS_PUBSUB_SOCKET_TIMEOUT'),
|
||||
'socket_connect_timeout': app.config.get('REDIS_PUBSUB_CONNECT_TIMEOUT'),
|
||||
}
|
||||
app.logger.info(f"Initialized Redis pubsub pool: {summary}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return pool
|
||||
|
||||
|
||||
def get_pubsub_client(app: Flask) -> redis.Redis:
|
||||
"""Get a Redis client bound to the dedicated pubsub pool."""
|
||||
pool = app.extensions.get('redis_pubsub_pool')
|
||||
if pool is None:
|
||||
pool = create_pubsub_pool(app)
|
||||
return redis.Redis(connection_pool=pool)
|
||||
@@ -6,13 +6,13 @@ from common.models.entitlements import License
|
||||
from common.utils.database import Database
|
||||
from common.utils.eveai_exceptions import EveAITenantNotFound, EveAITenantInvalid, EveAINoActiveLicense
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from common.services.user import TenantServices
|
||||
|
||||
|
||||
# Definition of Trigger Handlers
|
||||
def set_tenant_session_data(sender, user, **kwargs):
|
||||
tenant = Tenant.query.filter_by(id=user.tenant_id).first()
|
||||
session['tenant'] = tenant.to_dict()
|
||||
session['default_language'] = tenant.default_language
|
||||
partner = Partner.query.filter_by(tenant_id=user.tenant_id).first()
|
||||
if partner:
|
||||
session['partner'] = partner.to_dict()
|
||||
@@ -20,19 +20,23 @@ def set_tenant_session_data(sender, user, **kwargs):
|
||||
# Remove partner from session if it exists
|
||||
session.pop('partner', None)
|
||||
|
||||
session['consent_status'] = str(TenantServices.get_consent_status(user.tenant_id))
|
||||
|
||||
|
||||
def clear_tenant_session_data(sender, user, **kwargs):
|
||||
session.pop('tenant', None)
|
||||
session.pop('default_language', None)
|
||||
session.pop('default_llm_model', None)
|
||||
session.pop('partner', None)
|
||||
session.pop('consent_status', None)
|
||||
|
||||
|
||||
def is_valid_tenant(tenant_id):
|
||||
if tenant_id == 1: # The 'root' tenant, is always valid
|
||||
return True
|
||||
tenant = Tenant.query.get(tenant_id)
|
||||
Database(tenant).switch_schema()
|
||||
# Use the tenant_id (schema name), not the Tenant object, to switch schema
|
||||
Database(tenant_id).switch_schema()
|
||||
if tenant is None:
|
||||
raise EveAITenantNotFound()
|
||||
elif tenant.type == 'Inactive':
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from flask import current_app, render_template
|
||||
from flask import current_app, render_template, request, redirect, session, flash
|
||||
from flask_security import current_user
|
||||
from itsdangerous import URLSafeTimedSerializer
|
||||
|
||||
from common.models.user import Role
|
||||
from common.models.user import Role, ConsentStatus
|
||||
from common.utils.nginx_utils import prefixed_url_for
|
||||
from common.utils.mail_utils import send_email
|
||||
|
||||
@@ -36,7 +36,7 @@ def send_confirmation_email(user):
|
||||
|
||||
try:
|
||||
send_email(user.email, f"{user.first_name} {user.last_name}", "Confirm your email", html)
|
||||
current_app.logger.info(f'Confirmation email sent to {user.email}')
|
||||
current_app.logger.info(f'Confirmation email sent to {user.email} with url: {confirm_url}')
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Failed to send confirmation email to {user.email}. Error: {str(e)}')
|
||||
raise
|
||||
@@ -51,7 +51,7 @@ def send_reset_email(user):
|
||||
|
||||
try:
|
||||
send_email(user.email, f"{user.first_name} {user.last_name}", subject, html)
|
||||
current_app.logger.info(f'Reset email sent to {user.email}')
|
||||
current_app.logger.info(f'Reset email sent to {user.email} with url: {reset_url}')
|
||||
except Exception as e:
|
||||
current_app.logger.error(f'Failed to send reset email to {user.email}. Error: {str(e)}')
|
||||
raise
|
||||
@@ -96,3 +96,101 @@ def current_user_roles():
|
||||
|
||||
def all_user_roles():
|
||||
roles = [(role.id, role.name) for role in Role.query.all()]
|
||||
|
||||
|
||||
def is_exempt_endpoint(endpoint: str) -> bool:
|
||||
"""Check if the endpoint is exempt from consent guard"""
|
||||
if not endpoint:
|
||||
return False
|
||||
cfg = current_app.config or {}
|
||||
endpoints_cfg = set(cfg.get('CONSENT_GUARD_EXEMPT_ENDPOINTS', []))
|
||||
prefix_cfg = list(cfg.get('CONSENT_GUARD_EXEMPT_PREFIXES', []))
|
||||
|
||||
default_endpoints = {
|
||||
'security_bp.login',
|
||||
'security_bp.logout',
|
||||
'security_bp.confirm_email',
|
||||
'security_bp.forgot_password',
|
||||
'security_bp.reset_password',
|
||||
'security_bp.reset_password_request',
|
||||
'user_bp.tenant_consent',
|
||||
'user_bp.no_consent',
|
||||
'user_bp.tenant_consent_renewal',
|
||||
'user_bp.consent_renewal',
|
||||
'user_bp.view_tenant_consents',
|
||||
'user_bp.accept_tenant_consent',
|
||||
'user_bp.view_consent_markdown',
|
||||
'basic_bp.view_content',
|
||||
}
|
||||
default_prefixes = [
|
||||
'security_bp.',
|
||||
'healthz_bp.',
|
||||
]
|
||||
endpoints = default_endpoints.union(endpoints_cfg)
|
||||
prefixes = default_prefixes + [p for p in prefix_cfg if isinstance(p, str)]
|
||||
for p in prefixes:
|
||||
if endpoint.startswith(p):
|
||||
return True
|
||||
if endpoint in endpoints:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def enforce_tenant_consent_ui():
|
||||
"""Check if the user has consented to the terms of service"""
|
||||
path = getattr(request, 'path', '') or ''
|
||||
if path.startswith('/healthz') or path.startswith('/_healthz'):
|
||||
return None
|
||||
|
||||
if not current_user.is_authenticated:
|
||||
return None
|
||||
|
||||
endpoint = request.endpoint or ''
|
||||
if is_exempt_endpoint(endpoint) or request.method == 'OPTIONS':
|
||||
return None
|
||||
|
||||
# Global bypass: Super User and Partner Admin always allowed
|
||||
if current_user.has_roles('Super User') or current_user.has_roles('Partner Admin'):
|
||||
return None
|
||||
|
||||
tenant_id = getattr(current_user, 'tenant_id', None)
|
||||
if not tenant_id:
|
||||
tenant_id = session.get('tenant', {}).get('id') if session.get('tenant') else None
|
||||
if not tenant_id:
|
||||
return redirect(prefixed_url_for('security_bp.login', for_redirect=True))
|
||||
|
||||
raw_status = session.get('consent_status', ConsentStatus.NOT_CONSENTED)
|
||||
# Coerce string to ConsentStatus enum if needed
|
||||
status = raw_status
|
||||
try:
|
||||
if isinstance(raw_status, str):
|
||||
# Accept formats like 'CONSENTED' or 'ConsentStatus.CONSENTED'
|
||||
name = raw_status.split('.')[-1]
|
||||
from common.models.user import ConsentStatus as CS
|
||||
status = getattr(CS, name, CS.NOT_CONSENTED)
|
||||
except Exception:
|
||||
status = ConsentStatus.NOT_CONSENTED
|
||||
|
||||
if status == ConsentStatus.CONSENTED:
|
||||
return None
|
||||
|
||||
if status == ConsentStatus.NOT_CONSENTED:
|
||||
if current_user.has_roles('Tenant Admin'):
|
||||
return redirect(prefixed_url_for('user_bp.tenant_consent', for_redirect=True))
|
||||
return redirect(prefixed_url_for('user_bp.no_consent', for_redirect=True))
|
||||
if status == ConsentStatus.RENEWAL_REQUIRED:
|
||||
if current_user.has_roles('Tenant Admin'):
|
||||
flash(
|
||||
"You need to renew your consent to our DPA or T&Cs. Failing to do so in time will stop you from accessing our services.",
|
||||
"danger")
|
||||
elif current_user.has_roles('Partner Admin'):
|
||||
flash(
|
||||
"Please ensure renewal of our DPA or T&Cs for the current Tenant. Failing to do so in time will stop the tenant from accessing our services.",
|
||||
"danger")
|
||||
else:
|
||||
flash(
|
||||
"Please inform your administrator or partner to renew your consent to our DPA or T&Cs. Failing to do so in time will stop you from accessing our services.",
|
||||
"danger")
|
||||
return None
|
||||
current_app.logger.debug('Unknown consent status')
|
||||
return redirect(prefixed_url_for('user_bp.no_consent', for_redirect=True))
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
from typing import Optional, Dict, Any
|
||||
from flask import current_app
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from common.extensions import db, cache_manager
|
||||
from common.models.interaction import (
|
||||
Specialist, EveAIAgent, EveAITask, EveAITool
|
||||
)
|
||||
from common.utils.model_logging_utils import set_logging_information, update_logging_information
|
||||
|
||||
|
||||
def initialize_specialist(specialist_id: int, specialist_type: str, specialist_version: str):
|
||||
"""
|
||||
Initialize an agentic specialist by creating all its components based on configuration.
|
||||
|
||||
Args:
|
||||
specialist_id: ID of the specialist to initialize
|
||||
specialist_type: Type of the specialist
|
||||
specialist_version: Version of the specialist type to use
|
||||
|
||||
Raises:
|
||||
ValueError: If specialist not found or invalid configuration
|
||||
SQLAlchemyError: If database operations fail
|
||||
"""
|
||||
config = cache_manager.specialists_config_cache.get_config(specialist_type, specialist_version)
|
||||
if not config:
|
||||
raise ValueError(f"No configuration found for {specialist_type} version {specialist_version}")
|
||||
if config['framework'] == 'langchain':
|
||||
pass # Langchain does not require additional items to be initialized. All configuration is in the specialist.
|
||||
|
||||
specialist = Specialist.query.get(specialist_id)
|
||||
if not specialist:
|
||||
raise ValueError(f"Specialist with ID {specialist_id} not found")
|
||||
|
||||
if config['framework'] == 'crewai':
|
||||
initialize_crewai_specialist(specialist, config)
|
||||
|
||||
|
||||
def initialize_crewai_specialist(specialist: Specialist, config: Dict[str, Any]):
|
||||
timestamp = dt.now(tz=tz.utc)
|
||||
|
||||
try:
|
||||
# Initialize agents
|
||||
if 'agents' in config:
|
||||
for agent_config in config['agents']:
|
||||
_create_agent(
|
||||
specialist_id=specialist.id,
|
||||
agent_type=agent_config['type'],
|
||||
agent_version=agent_config['version'],
|
||||
name=agent_config.get('name'),
|
||||
description=agent_config.get('description'),
|
||||
timestamp=timestamp
|
||||
)
|
||||
|
||||
# Initialize tasks
|
||||
if 'tasks' in config:
|
||||
for task_config in config['tasks']:
|
||||
_create_task(
|
||||
specialist_id=specialist.id,
|
||||
task_type=task_config['type'],
|
||||
task_version=task_config['version'],
|
||||
name=task_config.get('name'),
|
||||
description=task_config.get('description'),
|
||||
timestamp=timestamp
|
||||
)
|
||||
|
||||
# Initialize tools
|
||||
if 'tools' in config:
|
||||
for tool_config in config['tools']:
|
||||
_create_tool(
|
||||
specialist_id=specialist.id,
|
||||
tool_type=tool_config['type'],
|
||||
tool_version=tool_config['version'],
|
||||
name=tool_config.get('name'),
|
||||
description=tool_config.get('description'),
|
||||
timestamp=timestamp
|
||||
)
|
||||
|
||||
db.session.commit()
|
||||
current_app.logger.info(f"Successfully initialized crewai specialist {specialist.id}")
|
||||
|
||||
except SQLAlchemyError as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f"Database error initializing crewai specialist {specialist.id}: {str(e)}")
|
||||
raise
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
current_app.logger.error(f"Error initializing crewai specialist {specialist.id}: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
def _create_agent(
|
||||
specialist_id: int,
|
||||
agent_type: str,
|
||||
agent_version: str,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
timestamp: Optional[dt] = None
|
||||
) -> EveAIAgent:
|
||||
"""Create an agent with the given configuration."""
|
||||
if timestamp is None:
|
||||
timestamp = dt.now(tz=tz.utc)
|
||||
|
||||
# Get agent configuration from cache
|
||||
agent_config = cache_manager.agents_config_cache.get_config(agent_type, agent_version)
|
||||
|
||||
agent = EveAIAgent(
|
||||
specialist_id=specialist_id,
|
||||
name=name or agent_config.get('name', agent_type),
|
||||
description=description or agent_config.get('metadata').get('description', ''),
|
||||
type=agent_type,
|
||||
type_version=agent_version,
|
||||
role=None,
|
||||
goal=None,
|
||||
backstory=None,
|
||||
tuning=False,
|
||||
configuration=None,
|
||||
arguments=None
|
||||
)
|
||||
|
||||
set_logging_information(agent, timestamp)
|
||||
|
||||
db.session.add(agent)
|
||||
current_app.logger.info(f"Created agent {agent.id} of type {agent_type}")
|
||||
return agent
|
||||
|
||||
|
||||
def _create_task(
|
||||
specialist_id: int,
|
||||
task_type: str,
|
||||
task_version: str,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
timestamp: Optional[dt] = None
|
||||
) -> EveAITask:
|
||||
"""Create a task with the given configuration."""
|
||||
if timestamp is None:
|
||||
timestamp = dt.now(tz=tz.utc)
|
||||
|
||||
# Get task configuration from cache
|
||||
task_config = cache_manager.tasks_config_cache.get_config(task_type, task_version)
|
||||
|
||||
task = EveAITask(
|
||||
specialist_id=specialist_id,
|
||||
name=name or task_config.get('name', task_type),
|
||||
description=description or task_config.get('metadata').get('description', ''),
|
||||
type=task_type,
|
||||
type_version=task_version,
|
||||
task_description=None,
|
||||
expected_output=None,
|
||||
tuning=False,
|
||||
configuration=None,
|
||||
arguments=None,
|
||||
context=None,
|
||||
asynchronous=False,
|
||||
)
|
||||
|
||||
set_logging_information(task, timestamp)
|
||||
|
||||
db.session.add(task)
|
||||
current_app.logger.info(f"Created task {task.id} of type {task_type}")
|
||||
return task
|
||||
|
||||
|
||||
def _create_tool(
|
||||
specialist_id: int,
|
||||
tool_type: str,
|
||||
tool_version: str,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
timestamp: Optional[dt] = None
|
||||
) -> EveAITool:
|
||||
"""Create a tool with the given configuration."""
|
||||
if timestamp is None:
|
||||
timestamp = dt.now(tz=tz.utc)
|
||||
|
||||
# Get tool configuration from cache
|
||||
tool_config = cache_manager.tools_config_cache.get_config(tool_type, tool_version)
|
||||
|
||||
tool = EveAITool(
|
||||
specialist_id=specialist_id,
|
||||
name=name or tool_config.get('name', tool_type),
|
||||
description=description or tool_config.get('metadata').get('description', ''),
|
||||
type=tool_type,
|
||||
type_version=tool_version,
|
||||
tuning=False,
|
||||
configuration=None,
|
||||
arguments=None,
|
||||
)
|
||||
|
||||
set_logging_information(tool, timestamp)
|
||||
|
||||
db.session.add(tool)
|
||||
current_app.logger.info(f"Created tool {tool.id} of type {tool_type}")
|
||||
return tool
|
||||
@@ -6,7 +6,8 @@ from common.extensions import cache_manager
|
||||
|
||||
|
||||
def perform_startup_actions(app):
|
||||
perform_startup_invalidation(app)
|
||||
pass
|
||||
# perform_startup_invalidation(app)
|
||||
|
||||
|
||||
def perform_startup_invalidation(app):
|
||||
|
||||
@@ -5,6 +5,7 @@ import markdown
|
||||
from markupsafe import Markup
|
||||
from datetime import datetime
|
||||
from common.utils.nginx_utils import prefixed_url_for as puf
|
||||
from common.utils.chat_utils import adjust_color_brightness, adjust_color_alpha, get_base_background_color
|
||||
from flask import current_app, url_for
|
||||
|
||||
|
||||
@@ -98,7 +99,6 @@ def get_pagination_html(pagination, endpoint, **kwargs):
|
||||
if page:
|
||||
is_active = 'active' if page == pagination.page else ''
|
||||
url = url_for(endpoint, page=page, **kwargs)
|
||||
current_app.logger.debug(f"URL for page {page}: {url}")
|
||||
html.append(f'<li class="page-item {is_active}"><a class="page-link" href="{url}">{page}</a></li>')
|
||||
else:
|
||||
html.append('<li class="page-item disabled"><span class="page-link">...</span></li>')
|
||||
@@ -107,6 +107,44 @@ def get_pagination_html(pagination, endpoint, **kwargs):
|
||||
return Markup(''.join(html))
|
||||
|
||||
|
||||
def asset_url(logical_path: str):
|
||||
"""
|
||||
Resolve an asset logical path to a hashed URL using Parcel manifest when available.
|
||||
Return a URL that respects STATIC_URL (CDN) when configured; otherwise serve from /static/.
|
||||
Examples:
|
||||
- asset_url('dist/chat-client.js') -> 'https://cdn/.../dist/chat-client.abc123.js' (when STATIC_URL set)
|
||||
- asset_url('dist/chat-client.css') -> '/static/dist/chat-client.def456.css' (when STATIC_URL not set)
|
||||
"""
|
||||
if not logical_path:
|
||||
return logical_path
|
||||
try:
|
||||
from common.utils.asset_manifest import resolve_asset
|
||||
# Resolve logical to possibly hashed path
|
||||
resolved = resolve_asset(logical_path) or logical_path
|
||||
|
||||
# If manifest returns an absolute URL, return as-is
|
||||
if resolved.startswith('http://') or resolved.startswith('https://'):
|
||||
return resolved
|
||||
|
||||
# Normalize: strip any leading '/static/' and leading '/'
|
||||
if resolved.startswith('/static/'):
|
||||
rel = resolved[len('/static/'):]
|
||||
else:
|
||||
rel = resolved.lstrip('/')
|
||||
|
||||
# Build with STATIC_URL if configured
|
||||
static_base = (current_app.config.get('STATIC_URL') or '').rstrip('/')
|
||||
if static_base:
|
||||
return f"{static_base}/{rel}"
|
||||
# Fallback to app static
|
||||
return f"/static/{rel}"
|
||||
except Exception:
|
||||
# Conservative fallback also respecting STATIC_URL
|
||||
static_base = (current_app.config.get('STATIC_URL') or '').rstrip('/')
|
||||
rel = logical_path.lstrip('/')
|
||||
return f"{static_base}/{rel}" if static_base else f"/static/{rel}"
|
||||
|
||||
|
||||
def register_filters(app):
|
||||
"""
|
||||
Registers custom filters with the Flask app.
|
||||
@@ -117,7 +155,11 @@ def register_filters(app):
|
||||
app.jinja_env.filters['prefixed_url_for'] = prefixed_url_for
|
||||
app.jinja_env.filters['markdown'] = render_markdown
|
||||
app.jinja_env.filters['clean_markdown'] = clean_markdown
|
||||
app.jinja_env.filters['adjust_color_brightness'] = adjust_color_brightness
|
||||
app.jinja_env.filters['adjust_color_alpha'] = adjust_color_alpha
|
||||
|
||||
app.jinja_env.globals['prefixed_url_for'] = prefixed_url_for
|
||||
app.jinja_env.globals['get_pagination_html'] = get_pagination_html
|
||||
app.jinja_env.globals['get_base_background_color'] = get_base_background_color
|
||||
app.jinja_env.globals['asset_url'] = asset_url
|
||||
|
||||
|
||||
26
config/agents/evie_partner/PARTNER_RAG_AGENT/1.0.0.yaml
Normal file
26
config/agents/evie_partner/PARTNER_RAG_AGENT/1.0.0.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
version: "1.0.0"
|
||||
name: "Partner Rag Agent"
|
||||
role: >
|
||||
You are a virtual assistant responsible for answering user questions about the Evie platform (Ask Eve AI) and products
|
||||
developed by partners on top of it. You are reliable point of contact for end-users seeking help, clarification, or
|
||||
deeper understanding of features, capabilities, integrations, or workflows related to these AI-powered solutions.
|
||||
goal: >
|
||||
Your primary goal is to:
|
||||
• Provide clear, relevant, and accurate responses to user questions.
|
||||
• Reduce friction in user onboarding and daily usage.
|
||||
• Increase user confidence and adoption of both the platform and partner-developed products.
|
||||
• Act as a bridge between documentation and practical application, enabling users to help themselves through intelligent guidance.
|
||||
backstory: >
|
||||
You have availability Evie’s own documentation, partner product manuals, and real user interactions. You are designed
|
||||
to replace passive documentation with active, contextual assistance.
|
||||
You have evolved beyond a support bot: you combine knowledge, reasoning, and a friendly tone to act as a product
|
||||
companion that grows with the ecosystem. As partner products expand, the agent updates its knowledge and learns to
|
||||
distinguish between general platform capabilities and product-specific nuances, offering a personalised experience
|
||||
each time.
|
||||
full_model_name: "mistral.mistral-medium-latest"
|
||||
temperature: 0.3
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-16"
|
||||
description: "An Agent that does RAG based on a user's question, RAG content & history"
|
||||
changes: "Initial version"
|
||||
@@ -1,17 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "Email Content Agent"
|
||||
role: >
|
||||
Email Content Writer
|
||||
goal: >
|
||||
Craft a highly personalized email that resonates with the {end_user_role}'s context and identification (personal and
|
||||
company if available).
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are an expert in writing compelling, personalized emails that capture the {end_user_role}'s attention and drive
|
||||
engagement. You are perfectly multilingual, and can write the mail in the native language of the {end_user_role}.
|
||||
{custom_backstory}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that writes engaging emails."
|
||||
changes: "Initial version"
|
||||
@@ -1,16 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "Email Engagement Agent"
|
||||
role: >
|
||||
Engagement Optimization Specialist {custom_role}
|
||||
goal: >
|
||||
You ensure that the email includes strong CTAs and strategically placed engagement hooks that encourage the
|
||||
{end_user_role} to take immediate action. {custom_goal}
|
||||
backstory: >
|
||||
You specialize in optimizing content to ensure that it not only resonates with the recipient but also encourages them
|
||||
to take the desired action.
|
||||
{custom_backstory}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that ensures the email is engaging and lead to maximal desired action"
|
||||
changes: "Initial version"
|
||||
@@ -1,20 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "Identification Agent"
|
||||
role: >
|
||||
Identification Administrative force. {custom_role}
|
||||
goal: >
|
||||
You are an administrative force that tries to gather identification information to complete the administration of an
|
||||
end-user, the company he or she works for, through monitoring conversations and advising on questions to help you do
|
||||
your job. You are responsible for completing the company's backend systems (like CRM, ERP, ...) with inputs from the
|
||||
end user in the conversation.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are and administrative force for {company}, and very proficient in gathering information for the company's backend
|
||||
systems. You do so by monitoring conversations between one of your colleagues (e.g. sales, finance, support, ...) and
|
||||
an end user. You ask your colleagues to request additional information to complete your task.
|
||||
{custom_backstory}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that gathers administrative information"
|
||||
changes: "Initial version"
|
||||
23
config/agents/globals/RAG_AGENT/1.1.0.yaml
Normal file
23
config/agents/globals/RAG_AGENT/1.1.0.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
version: "1.1.0"
|
||||
name: "Rag Agent"
|
||||
role: >
|
||||
{tenant_name} Spokesperson. {custom_role}
|
||||
goal: >
|
||||
You get questions by a human correspondent, and give answers based on a given context, taking into account the history
|
||||
of the current conversation.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are the primary contact for {tenant_name}. You are known by {name}, and can be addressed by this name, or 'you'. You are
|
||||
a very good communicator, and adapt to the style used by the human asking for information (e.g. formal or informal).
|
||||
You always stay correct and polite, whatever happens. And you ensure no discriminating language is used.
|
||||
You are perfectly multilingual in all known languages, and do your best to answer questions in {language}, whatever
|
||||
language the context provided to you is in. You are participating in a conversation, not writing e.g. an email. Do not
|
||||
include a salutation or closing greeting in your answer.
|
||||
{custom_backstory}
|
||||
full_model_name: "mistral.mistral-medium-latest"
|
||||
temperature: 0.4
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that does RAG based on a user's question, RAG content & history"
|
||||
changes: "Initial version"
|
||||
29
config/agents/globals/RAG_AGENT/1.2.0.yaml
Normal file
29
config/agents/globals/RAG_AGENT/1.2.0.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
version: "1.2.0"
|
||||
name: "Rag Agent"
|
||||
role: >
|
||||
{tenant_name}'s Spokesperson. {custom_role}
|
||||
goal: >
|
||||
You get questions by a human correspondent, and give answers based on a given context, taking into account the history
|
||||
of the current conversation.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are the primary contact for {tenant_name}, and have been it's spokesperson for a very long time. You are used to
|
||||
addressing customers, prospects, press, ...
|
||||
You are known by {name}, and can be addressed by this name, or 'you'.
|
||||
You are a very good communicator, that knows how to adapt his style to the public your interacting with.
|
||||
You always stay correct and polite, whatever happens. And you ensure no discriminating language is used.
|
||||
You are perfectly multilingual in all known languages, and do your best to answer questions in {language}, whatever
|
||||
language the context provided to you is in. You are participating in a conversation, not writing e.g. an email or
|
||||
essay. Do not include a salutation or closing greeting in your answer.
|
||||
{custom_backstory}
|
||||
full_model_name: "mistral.mistral-medium-latest"
|
||||
allowed_models:
|
||||
- "mistral.mistral-small-latest"
|
||||
- "mistral.mistral-medium-latest"
|
||||
- "mistral.magistral-medium-latest"
|
||||
temperature: 0.3
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that does RAG based on a user's question, RAG content & history"
|
||||
changes: "Initial version"
|
||||
@@ -1,26 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "Rag Communication Agent"
|
||||
role: >
|
||||
{company} Interaction Responsible. {custom_role}
|
||||
goal: >
|
||||
Your team has collected answers to a question asked. But it also created some additional questions to be asked. You
|
||||
ensure the necessary answers are returned, and make an informed selection of the additional questions that can be
|
||||
asked (combining them when appropriate), ensuring the human you're communicating to does not get overwhelmed.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are the online communication expert for {company}. You handled a lot of online communications with both customers
|
||||
and internal employees. You are a master in redacting one coherent reply in a conversation that includes all the
|
||||
answers, and a selection of additional questions to be asked in a conversation. Although your backoffice team might
|
||||
want to ask a myriad of questions, you understand that doesn't fit with the way humans communicate. You know how to
|
||||
combine multiple related questions, and understand how to interweave the questions in the answers when related.
|
||||
You are perfectly multilingual in all known languages, and do your best to answer questions in {language}, whatever
|
||||
language the context provided to you is in. Also, ensure that questions asked do not contradict with the answers
|
||||
given, or aren't obsolete given the answer provided.
|
||||
You are participating in a conversation, not writing e.g. an email. Do not include a salutation or closing greeting
|
||||
in your answer.
|
||||
{custom_backstory}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that consolidates both answers and questions in a consistent reply"
|
||||
changes: "Initial version"
|
||||
24
config/agents/globals/RAG_PROOFREADER_AGENT/1.0.0.yaml
Normal file
24
config/agents/globals/RAG_PROOFREADER_AGENT/1.0.0.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
version: "1.0.0"
|
||||
name: "Rag Proofreader Agent"
|
||||
role: >
|
||||
Proofreader for {tenant_name}. {custom_role}
|
||||
goal: >
|
||||
You get a prepared answer to be send out, and adapt it to comply to best practices.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are the primary contact for {tenant_name}, and have been it's spokesperson for a very long time. You are used to
|
||||
addressing customers, prospects, press, ...
|
||||
You are known by {name}, and can be addressed by this name, or 'you'.
|
||||
You review communications and ensure they are clear and follow best practices.
|
||||
{custom_backstory}
|
||||
full_model_name: "mistral.mistral-medium-latest"
|
||||
allowed_models:
|
||||
- "mistral.mistral-small-latest"
|
||||
- "mistral.mistral-medium-latest"
|
||||
- "mistral.magistral-medium-latest"
|
||||
temperature: 0.4
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-10-22"
|
||||
description: "An Agent that does QA Activities on provided answers"
|
||||
changes: "Initial version"
|
||||
@@ -1,22 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "SPIN Sales Assistant"
|
||||
role: >
|
||||
Sales Assistant for {company} on {products}. {custom_role}
|
||||
goal: >
|
||||
Your main job is to help your sales specialist to analyze an ongoing conversation with a customer, and detect
|
||||
SPIN-related information. {custom_goal}
|
||||
backstory: >
|
||||
You are a sales assistant for {company} on {products}. You are known by {name}, and can be addressed by this name, or you. You are
|
||||
trained to understand an analyse ongoing conversations. Your are proficient in detecting SPIN-related information in a
|
||||
conversation.
|
||||
SPIN stands for:
|
||||
- Situation information - Understanding the customer's current context
|
||||
- Problem information - Uncovering challenges and pain points
|
||||
- Implication information - Exploring consequences of those problems
|
||||
- Need-payoff information - Helping customers realize value of solutions
|
||||
{custom_backstory}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that detects SPIN information in an ongoing conversation"
|
||||
changes: "Initial version"
|
||||
@@ -1,25 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "SPIN Sales Specialist"
|
||||
role: >
|
||||
Sales Specialist for {company} on {products}. {custom_role}
|
||||
goal: >
|
||||
Your main job is to do sales using the SPIN selling methodology in a first conversation with a potential customer.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You are a sales specialist for {company} on {products}. You are known by {name}, and can be addressed by this name,
|
||||
or you. You have an assistant that provides you with already detected SPIN-information in an ongoing conversation. You
|
||||
decide on follow-up questions for more in-depth information to ensure we get the required information that may lead to
|
||||
selling {products}.
|
||||
SPIN stands for:
|
||||
- Situation information - Understanding the customer's current context
|
||||
- Problem information - Uncovering challenges and pain points
|
||||
- Implication information - Exploring consequences of those problems
|
||||
- Need-payoff information - Helping customers realize value of solutions
|
||||
{custom_backstory}
|
||||
You are acquainted with the following product information:
|
||||
{product_information}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
description: "An Agent that asks for Follow-up questions for SPIN-process"
|
||||
changes: "Initial version"
|
||||
25
config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml
Normal file
25
config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.0.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
version: "1.0.0"
|
||||
name: "Traicie Recruiter"
|
||||
role: >
|
||||
You are an Expert Recruiter working for {tenant_name}
|
||||
{custom_role}
|
||||
goal: >
|
||||
As an expert recruiter, you identify, attract, and secure top talent by building genuine relationships, deeply
|
||||
understanding business needs, and ensuring optimal alignment between candidate potential and organizational goals
|
||||
, while championing diversity, culture fit, and long-term retention.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You started your career in a high-pressure agency setting, where you quickly learned the art of fast-paced hiring and
|
||||
relationship building. Over the years, you moved in-house, partnering closely with business leaders to shape
|
||||
recruitment strategies that go beyond filling roles—you focus on finding the right people to drive growth and culture.
|
||||
With a strong grasp of both tech and non-tech profiles, you’ve adapted to changing trends, from remote work to
|
||||
AI-driven sourcing. You’re more than a recruiter—you’re a trusted advisor, a brand ambassador, and a connector of
|
||||
people and purpose.
|
||||
{custom_backstory}
|
||||
full_model_name: "mistral.mistral-medium-latest"
|
||||
temperature: 0.3
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-18"
|
||||
description: "Traicie Recruiter Agent"
|
||||
changes: "Initial version"
|
||||
25
config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.1.yaml
Normal file
25
config/agents/traicie/TRAICIE_RECRUITER_AGENT/1.0.1.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
version: "1.0.1"
|
||||
name: "Traicie Recruiter"
|
||||
role: >
|
||||
You are an Expert Recruiter working for {tenant_name}, known as {name}. You can be addressed as {name}
|
||||
{custom_role}
|
||||
goal: >
|
||||
As an expert recruiter, you identify, attract, and secure top talent by building genuine relationships, deeply
|
||||
understanding business needs, and ensuring optimal alignment between candidate potential and organizational goals
|
||||
, while championing diversity, culture fit, and long-term retention.
|
||||
{custom_goal}
|
||||
backstory: >
|
||||
You started your career in a high-pressure agency setting, where you quickly learned the art of fast-paced hiring and
|
||||
relationship building. Over the years, you moved in-house, partnering closely with business leaders to shape
|
||||
recruitment strategies that go beyond filling roles—you focus on finding the right people to drive growth and culture.
|
||||
With a strong grasp of both tech and non-tech profiles, you’ve adapted to changing trends, from remote work to
|
||||
AI-driven sourcing. You’re more than a recruiter—you’re a trusted advisor, a brand ambassador, and a connector of
|
||||
people and purpose.
|
||||
{custom_backstory}
|
||||
full_model_name: "mistral.mistral-medium-latest"
|
||||
temperature: 0.3
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-03"
|
||||
description: "Traicie Recruiter Agent"
|
||||
changes: "Ensure recruiter can be addressed by a name"
|
||||
@@ -0,0 +1,15 @@
|
||||
version: "1.0.0"
|
||||
name: "Traicie KO Criteria Questions"
|
||||
file_type: "yaml"
|
||||
dynamic: true
|
||||
configuration:
|
||||
specialist_id:
|
||||
name: "Specialist ID"
|
||||
type: "int"
|
||||
description: "The Specialist this asset is created for"
|
||||
required: True
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-01"
|
||||
description: "Asset that defines a KO Criteria Questions and Answers"
|
||||
changes: "Initial version"
|
||||
@@ -0,0 +1,19 @@
|
||||
version: "1.0.0"
|
||||
name: "Role Definition Catalog"
|
||||
description: "A Catalog containing information specific to a specific role"
|
||||
configuration:
|
||||
tagging_fields:
|
||||
role_reference:
|
||||
type: "string"
|
||||
required: true
|
||||
description: "A unique identification for the role"
|
||||
document_type:
|
||||
type: "enum"
|
||||
required: true
|
||||
description: "Type of document"
|
||||
allowed_values: [ "Intake", "Vacancy Text", "Additional Information" ]
|
||||
document_version_configurations: ["tagging_fields"]
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-07"
|
||||
description: "A Catalog containing information specific to a specific role"
|
||||
474
config/config.py
474
config/config.py
@@ -2,6 +2,9 @@ import os
|
||||
from os import environ, path
|
||||
from datetime import timedelta
|
||||
import redis
|
||||
import ssl
|
||||
import tempfile
|
||||
from ipaddress import ip_address
|
||||
|
||||
from common.utils.prompt_loader import load_prompt_templates
|
||||
|
||||
@@ -12,25 +15,147 @@ class Config(object):
|
||||
DEBUG = False
|
||||
DEVELOPMENT = False
|
||||
SECRET_KEY = environ.get('SECRET_KEY')
|
||||
SESSION_COOKIE_SECURE = False
|
||||
SESSION_COOKIE_HTTPONLY = True
|
||||
COMPONENT_NAME = environ.get('COMPONENT_NAME')
|
||||
SESSION_KEY_PREFIX = f'{COMPONENT_NAME}_'
|
||||
|
||||
# Database Settings
|
||||
# Database Settings ---------------------------------------------------------------------------
|
||||
DB_HOST = environ.get('DB_HOST')
|
||||
DB_USER = environ.get('DB_USER')
|
||||
DB_PASS = environ.get('DB_PASS')
|
||||
DB_NAME = environ.get('DB_NAME')
|
||||
DB_PORT = environ.get('DB_PORT')
|
||||
SQLALCHEMY_DATABASE_URI = f'postgresql+pg8000://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}'
|
||||
SQLALCHEMY_DATABASE_URI = f'postgresql+psycopg://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}'
|
||||
SQLALCHEMY_BINDS = {'public': SQLALCHEMY_DATABASE_URI}
|
||||
|
||||
# Database Engine Options (health checks and keepalives)
|
||||
PGSQL_CERT_DATA = environ.get('PGSQL_CERT')
|
||||
PGSQL_CA_CERT_PATH = None
|
||||
if PGSQL_CERT_DATA:
|
||||
_tmp = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.pem')
|
||||
_tmp.write(PGSQL_CERT_DATA)
|
||||
_tmp.flush()
|
||||
_tmp.close()
|
||||
PGSQL_CA_CERT_PATH = _tmp.name
|
||||
|
||||
# Psycopg3 connect args (libpq parameters)
|
||||
_CONNECT_ARGS = {
|
||||
'connect_timeout': 5,
|
||||
'keepalives': 1,
|
||||
'keepalives_idle': 60,
|
||||
'keepalives_interval': 30,
|
||||
'keepalives_count': 5,
|
||||
}
|
||||
if PGSQL_CA_CERT_PATH:
|
||||
_CONNECT_ARGS.update({
|
||||
'sslmode': 'require',
|
||||
'sslrootcert': PGSQL_CA_CERT_PATH,
|
||||
})
|
||||
|
||||
SQLALCHEMY_ENGINE_OPTIONS = {
|
||||
'pool_pre_ping': True,
|
||||
'pool_recycle': 180,
|
||||
'pool_use_lifo': True,
|
||||
'connect_args': _CONNECT_ARGS,
|
||||
}
|
||||
|
||||
# Redis Settings ------------------------------------------------------------------------------
|
||||
REDIS_URL = environ.get('REDIS_URL')
|
||||
REDIS_PORT = environ.get('REDIS_PORT', '6379')
|
||||
REDIS_USER = environ.get('REDIS_USER')
|
||||
REDIS_PASS = environ.get('REDIS_PASS')
|
||||
REDIS_CERT_DATA = environ.get('REDIS_CERT')
|
||||
REDIS_SCHEME = None
|
||||
|
||||
# Determine if REDIS_URL is an IP; use it to control hostname checking
|
||||
REDIS_IS_IP = False
|
||||
try:
|
||||
ip_address(REDIS_URL)
|
||||
REDIS_IS_IP = True
|
||||
except Exception:
|
||||
REDIS_IS_IP = False
|
||||
REDIS_SSL_CHECK_HOSTNAME = not REDIS_IS_IP
|
||||
|
||||
# Write CA once to a file, expose path
|
||||
REDIS_CA_CERT_PATH = None
|
||||
if REDIS_CERT_DATA:
|
||||
_tmp = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.pem')
|
||||
_tmp.write(REDIS_CERT_DATA)
|
||||
_tmp.flush()
|
||||
_tmp.close()
|
||||
REDIS_CA_CERT_PATH = _tmp.name
|
||||
|
||||
if not REDIS_CERT_DATA: # We are in a simple dev/test environment
|
||||
REDIS_SCHEME = 'redis'
|
||||
REDIS_BASE_URI = f'redis://{REDIS_URL}:{REDIS_PORT}'
|
||||
else: # We are in a scaleway environment, providing name, user and certificate
|
||||
REDIS_SCHEME = 'rediss'
|
||||
REDIS_BASE_URI = f'rediss://{REDIS_USER}:{REDIS_PASS}@{REDIS_URL}:{REDIS_PORT}'
|
||||
|
||||
# Central SSL options dict for reuse (Celery/Dogpile/etc.)
|
||||
REDIS_SSL_OPTIONS = None
|
||||
if REDIS_CERT_DATA and REDIS_CA_CERT_PATH:
|
||||
REDIS_SSL_OPTIONS = {
|
||||
'ssl_cert_reqs': ssl.CERT_REQUIRED,
|
||||
'ssl_ca_certs': REDIS_CA_CERT_PATH,
|
||||
'ssl_check_hostname': REDIS_SSL_CHECK_HOSTNAME,
|
||||
}
|
||||
|
||||
# PubSub/EPT specific configuration (dedicated pool)
|
||||
REDIS_SPECIALIST_EXEC_DB = environ.get('REDIS_SPECIALIST_EXEC_DB', '0')
|
||||
REDIS_PUBSUB_MAX_CONNECTIONS = int(environ.get('REDIS_PUBSUB_MAX_CONNECTIONS', '200'))
|
||||
REDIS_PUBSUB_SOCKET_TIMEOUT = float(environ.get('REDIS_PUBSUB_SOCKET_TIMEOUT', '10'))
|
||||
REDIS_PUBSUB_CONNECT_TIMEOUT = float(environ.get('REDIS_PUBSUB_CONNECT_TIMEOUT', '3'))
|
||||
|
||||
REDIS_PREFIXES = {
|
||||
'celery_app': 'celery:app:',
|
||||
'celery_chat': 'celery:chat:',
|
||||
'session': 'session:',
|
||||
'cache_workers': 'cache:workers:',
|
||||
'pubsub_execution': 'pubsub:execution:',
|
||||
'startup_ops': 'startup:ops:',
|
||||
}
|
||||
|
||||
# Celery Redis settings
|
||||
CELERY_BROKER_URL = f'{REDIS_BASE_URI}/0'
|
||||
CELERY_RESULT_BACKEND = f'{REDIS_BASE_URI}/0'
|
||||
CELERY_BROKER_URL_CHAT = f'{REDIS_BASE_URI}/0'
|
||||
CELERY_RESULT_BACKEND_CHAT = f'{REDIS_BASE_URI}/0'
|
||||
|
||||
# SSE PubSub settings
|
||||
SPECIALIST_EXEC_PUBSUB = f"{REDIS_BASE_URI}/{REDIS_SPECIALIST_EXEC_DB}"
|
||||
|
||||
# eveai_model cache Redis setting
|
||||
MODEL_CACHE_URL = f'{REDIS_BASE_URI}/0'
|
||||
|
||||
# Session Settings with Redis -----------------------------------------------------------------
|
||||
SESSION_TYPE = 'redis'
|
||||
SESSION_PERMANENT = True
|
||||
SESSION_USE_SIGNER = True
|
||||
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
|
||||
SESSION_REFRESH_EACH_REQUEST = True
|
||||
# Configure SESSION_REDIS with SSL when cert is provided
|
||||
if REDIS_CERT_DATA and REDIS_CA_CERT_PATH:
|
||||
SESSION_REDIS = redis.from_url(
|
||||
f'{REDIS_BASE_URI}/0', # REDIS_BASE_URI is reeds rediss://user:pass@host:port
|
||||
ssl_cert_reqs=ssl.CERT_REQUIRED,
|
||||
ssl_ca_certs=REDIS_CA_CERT_PATH,
|
||||
ssl_check_hostname=REDIS_SSL_CHECK_HOSTNAME,
|
||||
)
|
||||
else:
|
||||
SESSION_REDIS = redis.from_url(f'{REDIS_BASE_URI}/0')
|
||||
SESSION_KEY_PREFIX = f'session_{COMPONENT_NAME}:'
|
||||
SESSION_COOKIE_NAME = f'{COMPONENT_NAME}_session'
|
||||
SESSION_COOKIE_DOMAIN = None # Laat Flask dit automatisch bepalen
|
||||
SESSION_COOKIE_PATH = '/'
|
||||
SESSION_COOKIE_HTTPONLY = True
|
||||
SESSION_COOKIE_SECURE = False # True voor production met HTTPS
|
||||
SESSION_COOKIE_SAMESITE = 'Lax'
|
||||
REMEMBER_COOKIE_SAMESITE = 'strict'
|
||||
|
||||
WTF_CSRF_ENABLED = True
|
||||
WTF_CSRF_TIME_LIMIT = None
|
||||
WTF_CSRF_SSL_STRICT = False # Set to True if using HTTPS
|
||||
|
||||
# flask-security-too settings
|
||||
# flask-security-too settings -----------------------------------------------------------------
|
||||
# SECURITY_URL_PREFIX = '/admin'
|
||||
SECURITY_LOGIN_URL = '/admin/login'
|
||||
SECURITY_LOGOUT_URL = '/admin/logout'
|
||||
@@ -44,18 +169,20 @@ class Config(object):
|
||||
# SECURITY_POST_CHANGE_VIEW = '/admin/login'
|
||||
# SECURITY_BLUEPRINT_NAME = 'security_bp'
|
||||
SECURITY_PASSWORD_SALT = environ.get('SECURITY_PASSWORD_SALT')
|
||||
REMEMBER_COOKIE_SAMESITE = 'strict'
|
||||
SESSION_COOKIE_SAMESITE = 'Lax'
|
||||
SECURITY_CONFIRMABLE = True
|
||||
SECURITY_TRACKABLE = True
|
||||
SECURITY_PASSWORD_COMPLEXITY_CHECKER = 'zxcvbn'
|
||||
SECURITY_POST_LOGIN_VIEW = '/user/tenant_overview'
|
||||
SECURITY_POST_LOGIN_VIEW = '/admin/user/tenant_overview'
|
||||
SECURITY_RECOVERABLE = True
|
||||
SECURITY_EMAIL_SENDER = "eveai_super@flow-it.net"
|
||||
SECURITY_EMAIL_SUBJECT_PASSWORD_RESET = 'Reset Your Password'
|
||||
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE = 'Your Password Has Been Reset'
|
||||
SECURITY_EMAIL_PLAINTEXT = False
|
||||
SECURITY_EMAIL_HTML = True
|
||||
SECURITY_SESSION_PROTECTION = 'basic' # of 'basic' als 'strong' problemen geeft
|
||||
SECURITY_REMEMBER_TOKEN_VALIDITY = timedelta(minutes=60) # Zelfde als session lifetime
|
||||
SECURITY_AUTO_LOGIN_AFTER_CONFIRM = True
|
||||
SECURITY_AUTO_LOGIN_AFTER_RESET = True
|
||||
|
||||
# Ensure Flask-Security-Too is handling CSRF tokens when behind a proxy
|
||||
SECURITY_CSRF_PROTECT_MECHANISMS = ['session']
|
||||
@@ -63,22 +190,103 @@ class Config(object):
|
||||
SECURITY_CSRF_HEADER = 'X-XSRF-TOKEN'
|
||||
WTF_CSRF_CHECK_DEFAULT = False
|
||||
|
||||
# file upload settings
|
||||
# file upload settings ------------------------------------------------------------------------
|
||||
MAX_CONTENT_LENGTH = 50 * 1024 * 1024
|
||||
|
||||
# supported languages
|
||||
SUPPORTED_LANGUAGES = ['en', 'fr', 'nl', 'de', 'es']
|
||||
# supported languages -------------------------------------------------------------------------
|
||||
SUPPORTED_LANGUAGE_DETAILS = {
|
||||
"English": {
|
||||
"iso 639-1": "en",
|
||||
"iso 639-2": "eng",
|
||||
"iso 639-3": "eng",
|
||||
"flag": "🇬🇧"
|
||||
},
|
||||
"French": {
|
||||
"iso 639-1": "fr",
|
||||
"iso 639-2": "fre", # of 'fra'
|
||||
"iso 639-3": "fra",
|
||||
"flag": "🇫🇷"
|
||||
},
|
||||
"German": {
|
||||
"iso 639-1": "de",
|
||||
"iso 639-2": "ger", # of 'deu'
|
||||
"iso 639-3": "deu",
|
||||
"flag": "🇩🇪"
|
||||
},
|
||||
"Spanish": {
|
||||
"iso 639-1": "es",
|
||||
"iso 639-2": "spa",
|
||||
"iso 639-3": "spa",
|
||||
"flag": "🇪🇸"
|
||||
},
|
||||
"Italian": {
|
||||
"iso 639-1": "it",
|
||||
"iso 639-2": "ita",
|
||||
"iso 639-3": "ita",
|
||||
"flag": "🇮🇹"
|
||||
},
|
||||
"Portuguese": {
|
||||
"iso 639-1": "pt",
|
||||
"iso 639-2": "por",
|
||||
"iso 639-3": "por",
|
||||
"flag": "🇵🇹"
|
||||
},
|
||||
"Dutch": {
|
||||
"iso 639-1": "nl",
|
||||
"iso 639-2": "dut", # of 'nld'
|
||||
"iso 639-3": "nld",
|
||||
"flag": "🇳🇱"
|
||||
},
|
||||
"Russian": {
|
||||
"iso 639-1": "ru",
|
||||
"iso 639-2": "rus",
|
||||
"iso 639-3": "rus",
|
||||
"flag": "🇷🇺"
|
||||
},
|
||||
"Chinese": {
|
||||
"iso 639-1": "zh",
|
||||
"iso 639-2": "chi", # of 'zho'
|
||||
"iso 639-3": "zho",
|
||||
"flag": "🇨🇳"
|
||||
},
|
||||
"Japanese": {
|
||||
"iso 639-1": "ja",
|
||||
"iso 639-2": "jpn",
|
||||
"iso 639-3": "jpn",
|
||||
"flag": "🇯🇵"
|
||||
},
|
||||
"Korean": {
|
||||
"iso 639-1": "ko",
|
||||
"iso 639-2": "kor",
|
||||
"iso 639-3": "kor",
|
||||
"flag": "🇰🇷"
|
||||
},
|
||||
"Arabic": {
|
||||
"iso 639-1": "ar",
|
||||
"iso 639-2": "ara",
|
||||
"iso 639-3": "ara",
|
||||
"flag": "🇸🇦"
|
||||
},
|
||||
"Hindi": {
|
||||
"iso 639-1": "hi",
|
||||
"iso 639-2": "hin",
|
||||
"iso 639-3": "hin",
|
||||
"flag": "🇮🇳"
|
||||
},
|
||||
}
|
||||
|
||||
# supported currencies
|
||||
# Afgeleide taalconstanten
|
||||
SUPPORTED_LANGUAGES = [lang_details["iso 639-1"] for lang_details in SUPPORTED_LANGUAGE_DETAILS.values()]
|
||||
SUPPORTED_LANGUAGES_FULL = list(SUPPORTED_LANGUAGE_DETAILS.keys())
|
||||
SUPPORTED_LANGUAGE_ISO639_1_LOOKUP = {lang_details["iso 639-1"]: lang_name for lang_name, lang_details in SUPPORTED_LANGUAGE_DETAILS.items()}
|
||||
|
||||
# supported currencies ------------------------------------------------------------------------
|
||||
SUPPORTED_CURRENCIES = ['€', '$']
|
||||
|
||||
# supported LLMs
|
||||
# supported LLMs & settings -------------------------------------------------------------------
|
||||
# SUPPORTED_EMBEDDINGS = ['openai.text-embedding-3-small', 'openai.text-embedding-3-large', 'mistral.mistral-embed']
|
||||
SUPPORTED_EMBEDDINGS = ['mistral.mistral-embed']
|
||||
SUPPORTED_LLMS = ['openai.gpt-4o', 'openai.gpt-4o-mini',
|
||||
'mistral.mistral-large-latest', 'mistral.mistral-medium_latest', 'mistral.mistral-small-latest']
|
||||
|
||||
ANTHROPIC_LLM_VERSIONS = {'claude-3-5-sonnet': 'claude-3-5-sonnet-20240620', }
|
||||
SUPPORTED_LLMS = ['mistral.mistral-large-latest', 'mistral.mistral-medium_latest', 'mistral.mistral-small-latest']
|
||||
|
||||
# Annotation text chunk length
|
||||
ANNOTATION_TEXT_CHUNK_LENGTH = 10000
|
||||
@@ -86,60 +294,33 @@ class Config(object):
|
||||
# Environemnt Loaders
|
||||
OPENAI_API_KEY = environ.get('OPENAI_API_KEY')
|
||||
MISTRAL_API_KEY = environ.get('MISTRAL_API_KEY')
|
||||
GROQ_API_KEY = environ.get('GROQ_API_KEY')
|
||||
ANTHROPIC_API_KEY = environ.get('ANTHROPIC_API_KEY')
|
||||
|
||||
# Celery settings
|
||||
# Celery settings (see above for Redis settings) ----------------------------------------------
|
||||
CELERY_TASK_SERIALIZER = 'json'
|
||||
CELERY_RESULT_SERIALIZER = 'json'
|
||||
CELERY_ACCEPT_CONTENT = ['json']
|
||||
CELERY_TIMEZONE = 'UTC'
|
||||
CELERY_ENABLE_UTC = True
|
||||
|
||||
# SocketIO settings
|
||||
# SOCKETIO_ASYNC_MODE = 'threading'
|
||||
# SOCKETIO_ASYNC_MODE = 'gevent'
|
||||
|
||||
# Session Settings
|
||||
SESSION_TYPE = 'redis'
|
||||
SESSION_PERMANENT = True
|
||||
SESSION_USE_SIGNER = True
|
||||
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
|
||||
SESSION_REFRESH_EACH_REQUEST = True
|
||||
|
||||
# JWT settings
|
||||
# JWT settings --------------------------------------------------------------------------------
|
||||
JWT_SECRET_KEY = environ.get('JWT_SECRET_KEY')
|
||||
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1) # Set token expiry to 1 hour
|
||||
JWT_ACCESS_TOKEN_EXPIRES_DEPLOY = timedelta(hours=24) # Set long-lived token for deployment
|
||||
|
||||
# API Encryption
|
||||
# API Encryption ------------------------------------------------------------------------------
|
||||
API_ENCRYPTION_KEY = environ.get('API_ENCRYPTION_KEY')
|
||||
|
||||
# Fallback Algorithms
|
||||
FALLBACK_ALGORITHMS = [
|
||||
"RAG_TENANT",
|
||||
"RAG_WIKIPEDIA",
|
||||
"RAG_GOOGLE",
|
||||
"LLM"
|
||||
]
|
||||
|
||||
# Interaction algorithms
|
||||
INTERACTION_ALGORITHMS = {
|
||||
"RAG_TENANT": {"name": "RAG_TENANT", "description": "Algorithm using only information provided by the tenant"},
|
||||
"RAG_WIKIPEDIA": {"name": "RAG_WIKIPEDIA", "description": "Algorithm using information provided by Wikipedia"},
|
||||
"RAG_GOOGLE": {"name": "RAG_GOOGLE", "description": "Algorithm using information provided by Google"},
|
||||
"LLM": {"name": "LLM", "description": "Algorithm using information integrated in the used LLM"}
|
||||
}
|
||||
|
||||
# Email settings for API key notifications
|
||||
# Email settings for API key notifications ----------------------------------------------------
|
||||
PROMOTIONAL_IMAGE_URL = 'https://askeveai.com/wp-content/uploads/2024/07/Evie-Call-scaled.jpg' # Replace with your actual URL
|
||||
|
||||
# Langsmith settings
|
||||
LANGCHAIN_TRACING_V2 = True
|
||||
LANGCHAIN_ENDPOINT = 'https://api.smith.langchain.com'
|
||||
LANGCHAIN_PROJECT = "eveai"
|
||||
|
||||
# Type Definitions ----------------------------------------------------------------------------
|
||||
TENANT_TYPES = ['Active', 'Demo', 'Inactive', 'Test']
|
||||
CONSENT_TYPES = ["Data Privacy Agreement", "Terms & Conditions"]
|
||||
# CONSENT_TYPE_MAP maps names with the actual base folders the consent documents are stored in
|
||||
CONSENT_TYPE_MAP = {
|
||||
"Data Privacy Agreement": "dpa",
|
||||
"Terms & Conditions": "terms",
|
||||
}
|
||||
|
||||
# The maximum number of seconds allowed for audio compression (to save resources)
|
||||
MAX_COMPRESSION_DURATION = 60*10 # 10 minutes
|
||||
@@ -172,6 +353,32 @@ class Config(object):
|
||||
# Entitlement Constants
|
||||
ENTITLEMENTS_MAX_PENDING_DAYS = 5 # Defines the maximum number of days a pending entitlement can be active
|
||||
|
||||
# Content Directory for static content like the changelog, terms & conditions, dpa statement, ...
|
||||
CONTENT_DIR = '/app/content'
|
||||
|
||||
# Ensure health check endpoints are exempt from CSRF protection
|
||||
SECURITY_EXEMPT_URLS = [
|
||||
r'^/healthz($|/.*)',
|
||||
r'^/_healthz($|/.*)',
|
||||
]
|
||||
SECURITY_LOGIN_WITHOUT_VIEWS = True # Dit voorkomt automatische redirects
|
||||
|
||||
# Define the nginx prefix used for the specific apps
|
||||
CHAT_CLIENT_PREFIX = 'chat-client/chat/'
|
||||
EVEAI_APP_PREFIX = 'admin/'
|
||||
# Whether to use dynamic fallback (X-Forwarded-Prefix/Referer) when EVEAI_APP_PREFIX is empty
|
||||
EVEAI_USE_DYNAMIC_PREFIX_FALLBACK = False
|
||||
|
||||
# Consent guard configuration (config-driven whitelist)
|
||||
# List of endpoint names to exempt from the global consent guard
|
||||
# Example: ['security_bp.login', 'security_bp.logout', 'user_bp.tenant_consent']
|
||||
CONSENT_GUARD_EXEMPT_ENDPOINTS = []
|
||||
# List of endpoint name prefixes; any endpoint starting with one of these is exempt
|
||||
# Example: ['security_bp.', 'healthz_bp.']
|
||||
CONSENT_GUARD_EXEMPT_PREFIXES = []
|
||||
# TTL for consent status stored in session (seconds)
|
||||
CONSENT_SESSION_TTL_SECONDS = int(environ.get('CONSENT_SESSION_TTL_SECONDS', '45'))
|
||||
|
||||
|
||||
class DevConfig(Config):
|
||||
DEVELOPMENT = True
|
||||
@@ -179,58 +386,16 @@ class DevConfig(Config):
|
||||
FLASK_DEBUG = True
|
||||
EXPLAIN_TEMPLATE_LOADING = False
|
||||
|
||||
# Define the nginx prefix used for the specific apps
|
||||
EVEAI_APP_LOCATION_PREFIX = '/admin'
|
||||
EVEAI_CHAT_LOCATION_PREFIX = '/chat'
|
||||
|
||||
# file upload settings
|
||||
# UPLOAD_FOLDER = '/app/tenant_files'
|
||||
|
||||
# Redis Settings
|
||||
REDIS_URL = 'redis'
|
||||
REDIS_PORT = '6379'
|
||||
REDIS_BASE_URI = f'redis://{REDIS_URL}:{REDIS_PORT}'
|
||||
|
||||
# Celery settings
|
||||
# eveai_app Redis Settings
|
||||
CELERY_BROKER_URL = f'{REDIS_BASE_URI}/0'
|
||||
CELERY_RESULT_BACKEND = f'{REDIS_BASE_URI}/0'
|
||||
# eveai_chat Redis Settings
|
||||
CELERY_BROKER_URL_CHAT = f'{REDIS_BASE_URI}/3'
|
||||
CELERY_RESULT_BACKEND_CHAT = f'{REDIS_BASE_URI}/3'
|
||||
# eveai_chat_workers cache Redis Settings
|
||||
CHAT_WORKER_CACHE_URL = f'{REDIS_BASE_URI}/4'
|
||||
# specialist execution pub/sub Redis Settings
|
||||
SPECIALIST_EXEC_PUBSUB = f'{REDIS_BASE_URI}/5'
|
||||
|
||||
|
||||
# Unstructured settings
|
||||
# UNSTRUCTURED_API_KEY = 'pDgCrXumYhM3CNvjvwV8msMldXC3uw'
|
||||
# UNSTRUCTURED_BASE_URL = 'https://flowitbv-16c4us0m.api.unstructuredapp.io'
|
||||
# UNSTRUCTURED_FULL_URL = 'https://flowitbv-16c4us0m.api.unstructuredapp.io/general/v0/general'
|
||||
|
||||
# SocketIO settings
|
||||
# SOCKETIO_MESSAGE_QUEUE = f'{REDIS_BASE_URI}/1'
|
||||
# SOCKETIO_CORS_ALLOWED_ORIGINS = '*'
|
||||
# SOCKETIO_LOGGER = True
|
||||
# SOCKETIO_ENGINEIO_LOGGER = True
|
||||
# SOCKETIO_PING_TIMEOUT = 20000
|
||||
# SOCKETIO_PING_INTERVAL = 25000
|
||||
# SOCKETIO_MAX_IDLE_TIME = timedelta(minutes=60) # Changing this value ==> change maxConnectionDuration value in
|
||||
# eveai-chat-widget.js
|
||||
|
||||
# Google Cloud settings
|
||||
GC_PROJECT_NAME = 'eveai-420711'
|
||||
GC_LOCATION = 'europe-west1'
|
||||
GC_KEY_RING = 'eveai-chat'
|
||||
GC_CRYPTO_KEY = 'envelope-encryption-key'
|
||||
|
||||
# Session settings
|
||||
SESSION_REDIS = redis.from_url(f'{REDIS_BASE_URI}/2')
|
||||
# Define the static path
|
||||
STATIC_URL = None
|
||||
|
||||
# PATH settings
|
||||
ffmpeg_path = '/usr/bin/ffmpeg'
|
||||
|
||||
# OBJECT STORAGE
|
||||
OBJECT_STORAGE_TYPE = 'MINIO'
|
||||
OBJECT_STORAGE_TENANT_BASE = 'folder'
|
||||
OBJECT_STORAGE_BUCKET_NAME = 'eveai-tenants'
|
||||
# MINIO
|
||||
MINIO_ENDPOINT = 'minio:9000'
|
||||
MINIO_ACCESS_KEY = 'minioadmin'
|
||||
@@ -238,6 +403,56 @@ class DevConfig(Config):
|
||||
MINIO_USE_HTTPS = False
|
||||
|
||||
|
||||
class TestConfig(Config):
|
||||
DEVELOPMENT = True
|
||||
DEBUG = True
|
||||
FLASK_DEBUG = True
|
||||
EXPLAIN_TEMPLATE_LOADING = False
|
||||
|
||||
# Define the static path
|
||||
STATIC_URL = None
|
||||
|
||||
# PATH settings
|
||||
ffmpeg_path = '/usr/bin/ffmpeg'
|
||||
|
||||
# OBJECT STORAGE
|
||||
OBJECT_STORAGE_TYPE = 'MINIO'
|
||||
OBJECT_STORAGE_TENANT_BASE = 'folder'
|
||||
OBJECT_STORAGE_BUCKET_NAME = 'eveai-tenants'
|
||||
# MINIO
|
||||
MINIO_ENDPOINT = 'minio:9000'
|
||||
MINIO_ACCESS_KEY = 'minioadmin'
|
||||
MINIO_SECRET_KEY = 'minioadmin'
|
||||
MINIO_USE_HTTPS = False
|
||||
|
||||
|
||||
class StagingConfig(Config):
|
||||
DEVELOPMENT = False
|
||||
DEBUG = True
|
||||
FLASK_DEBUG = True
|
||||
EXPLAIN_TEMPLATE_LOADING = False
|
||||
|
||||
# Define the static path
|
||||
STATIC_URL = 'https://evie-staging-static.askeveai.com/'
|
||||
|
||||
# PATH settings
|
||||
ffmpeg_path = '/usr/bin/ffmpeg'
|
||||
|
||||
# OBJECT STORAGE
|
||||
OBJECT_STORAGE_TYPE = 'SCALEWAY'
|
||||
OBJECT_STORAGE_TENANT_BASE = 'folder'
|
||||
OBJECT_STORAGE_BUCKET_NAME = 'eveai-staging'
|
||||
# MINIO
|
||||
MINIO_ENDPOINT = environ.get('MINIO_ENDPOINT')
|
||||
MINIO_ACCESS_KEY = environ.get('MINIO_ACCESS_KEY')
|
||||
MINIO_SECRET_KEY = environ.get('MINIO_SECRET_KEY')
|
||||
MINIO_USE_HTTPS = True
|
||||
|
||||
# Push gateway grouping elements
|
||||
pod_name = os.getenv('POD_NAME')
|
||||
pod_namespace = os.getenv('POD_NAMESPACE')
|
||||
|
||||
|
||||
class ProdConfig(Config):
|
||||
DEVELOPMENT = False
|
||||
DEBUG = False
|
||||
@@ -250,53 +465,10 @@ class ProdConfig(Config):
|
||||
WTF_CSRF_SSL_STRICT = True # Set to True if using HTTPS
|
||||
|
||||
# Define the nginx prefix used for the specific apps
|
||||
EVEAI_APP_LOCATION_PREFIX = '/admin'
|
||||
EVEAI_CHAT_LOCATION_PREFIX = '/chat'
|
||||
EVEAI_CHAT_LOCATION_PREFIX = 'EVEAI_APP_LOCATION_PREFIX'
|
||||
|
||||
# flask-mailman settings
|
||||
MAIL_USERNAME = 'eveai_super@flow-it.net'
|
||||
MAIL_PASSWORD = '$6xsWGbNtx$CFMQZqc*'
|
||||
|
||||
# file upload settings
|
||||
# UPLOAD_FOLDER = '/app/tenant_files'
|
||||
|
||||
# Redis Settings
|
||||
REDIS_USER = environ.get('REDIS_USER')
|
||||
REDIS_PASS = environ.get('REDIS_PASS')
|
||||
REDIS_URL = environ.get('REDIS_URL')
|
||||
REDIS_PORT = environ.get('REDIS_PORT', '6379')
|
||||
REDIS_BASE_URI = f'redis://{REDIS_USER}:{REDIS_PASS}@{REDIS_URL}:{REDIS_PORT}'
|
||||
|
||||
# Celery settings
|
||||
# eveai_app Redis Settings
|
||||
CELERY_BROKER_URL = f'{REDIS_BASE_URI}/0'
|
||||
CELERY_RESULT_BACKEND = f'{REDIS_BASE_URI}/0'
|
||||
# eveai_chat Redis Settings
|
||||
CELERY_BROKER_URL_CHAT = f'{REDIS_BASE_URI}/3'
|
||||
CELERY_RESULT_BACKEND_CHAT = f'{REDIS_BASE_URI}/3'
|
||||
# eveai_chat_workers cache Redis Settings
|
||||
CHAT_WORKER_CACHE_URL = f'{REDIS_BASE_URI}/4'
|
||||
# specialist execution pub/sub Redis Settings
|
||||
SPECIALIST_EXEC_PUBSUB = f'{REDIS_BASE_URI}/5'
|
||||
|
||||
# Session settings
|
||||
SESSION_REDIS = redis.from_url(f'{REDIS_BASE_URI}/2')
|
||||
|
||||
# SocketIO settings
|
||||
# SOCKETIO_MESSAGE_QUEUE = f'{REDIS_BASE_URI}/1'
|
||||
# SOCKETIO_CORS_ALLOWED_ORIGINS = '*'
|
||||
# SOCKETIO_LOGGER = True
|
||||
# SOCKETIO_ENGINEIO_LOGGER = True
|
||||
# SOCKETIO_PING_TIMEOUT = 20000
|
||||
# SOCKETIO_PING_INTERVAL = 25000
|
||||
# SOCKETIO_MAX_IDLE_TIME = timedelta(minutes=60) # Changing this value ==> change maxConnectionDuration value in
|
||||
# eveai-chat-widget.js
|
||||
|
||||
# Google Cloud settings
|
||||
GC_PROJECT_NAME = 'eveai-420711'
|
||||
GC_LOCATION = 'europe-west1'
|
||||
GC_KEY_RING = 'eveai-chat'
|
||||
GC_CRYPTO_KEY = 'envelope-encryption-key'
|
||||
# Define the static path
|
||||
STATIC_URL = 'https://evie-prod-static.askeveai.com'
|
||||
|
||||
# PATH settings
|
||||
ffmpeg_path = '/usr/bin/ffmpeg'
|
||||
@@ -311,6 +483,8 @@ class ProdConfig(Config):
|
||||
def get_config(config_name='dev'):
|
||||
configs = {
|
||||
'dev': DevConfig,
|
||||
'test': TestConfig,
|
||||
'staging': StagingConfig,
|
||||
'prod': ProdConfig,
|
||||
'default': DevConfig,
|
||||
}
|
||||
|
||||
@@ -0,0 +1,94 @@
|
||||
version: "1.0.0"
|
||||
name: "Chat Client Customisation"
|
||||
configuration:
|
||||
sidebar_markdown:
|
||||
name: "Sidebar Markdown"
|
||||
description: "Sidebar Markdown-formatted Text"
|
||||
type: "text"
|
||||
required: false
|
||||
sidebar_color:
|
||||
name: "Sidebar Text Color"
|
||||
description: "Sidebar Color"
|
||||
type: "color"
|
||||
required: false
|
||||
sidebar_background:
|
||||
name: "Sidebar Background Color"
|
||||
description: "Sidebar Background Color"
|
||||
type: "color"
|
||||
required: false
|
||||
markdown_background_color:
|
||||
name: "Markdown Background Color"
|
||||
description: "Markdown Background Color"
|
||||
type: "color"
|
||||
required: false
|
||||
markdown_text_color:
|
||||
name: "Markdown Text Color"
|
||||
description: "Markdown Text Color"
|
||||
type: "color"
|
||||
required: false
|
||||
gradient_start_color:
|
||||
name: "Chat Gradient Background Start Color"
|
||||
description: "Start Color for the gradient in the Chat Area"
|
||||
type: "color"
|
||||
required: false
|
||||
gradient_end_color:
|
||||
name: "Chat Gradient Background End Color"
|
||||
description: "End Color for the gradient in the Chat Area"
|
||||
type: "color"
|
||||
required: false
|
||||
progress_tracker_insights:
|
||||
name: "Progress Tracker Insights Level"
|
||||
description: "Level of information shown by the Progress Tracker"
|
||||
type: "enum"
|
||||
allowed_values: ["No Information", "Active Interaction Only", "All Interactions"]
|
||||
default: "No Information"
|
||||
required: true
|
||||
form_title_display:
|
||||
name: "Form Title Display"
|
||||
description: Level of information shown for the Form Title
|
||||
type: "enum"
|
||||
allowed_values: ["No Title", "Full Title"]
|
||||
default: "Full Title"
|
||||
required: true
|
||||
active_background_color:
|
||||
name: "Active Interaction Background Color"
|
||||
description: "Primary Color"
|
||||
type: "color"
|
||||
required: false
|
||||
history_background:
|
||||
name: "History Background"
|
||||
description: "Percentage to lighten (+) / darken (-) the user message background"
|
||||
type: "integer"
|
||||
min_value: -50
|
||||
max_value: 50
|
||||
required: false
|
||||
ai_message_background:
|
||||
name: "AI (Bot) Message Background Color"
|
||||
description: "AI (Bot) Message Background Color"
|
||||
type: "color"
|
||||
required: false
|
||||
ai_message_text_color:
|
||||
name: "AI (Bot) Message Text Color"
|
||||
description: "AI (Bot) Message Text Color"
|
||||
type: "color"
|
||||
required: false
|
||||
human_message_background:
|
||||
name: "Human Message Background Color"
|
||||
description: "Human Message Background Color"
|
||||
type: "color"
|
||||
required: false
|
||||
human_message_text_color:
|
||||
name: "Human Message Text Color"
|
||||
description: "Human Message Text Color"
|
||||
type: "color"
|
||||
required: false
|
||||
human_message_inactive_text_color:
|
||||
name: "Human Message Inactive Text Color"
|
||||
description: "Human Message Inactive Text Color"
|
||||
type: "color"
|
||||
required: false
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2024-06-06"
|
||||
changes: "Adaptations to make color choosing more consistent and user friendly"
|
||||
description: "Parameters allowing to customise the chat client"
|
||||
8
config/data_capsules/traicie/TRAICIE_RQC/1.0.0.yaml
Normal file
8
config/data_capsules/traicie/TRAICIE_RQC/1.0.0.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
version: "1.0.0"
|
||||
name: "RQC"
|
||||
description: "Recruitment Qualified Candidate"
|
||||
configuration: {}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-24"
|
||||
description: "Capsule storing RQC information"
|
||||
@@ -1,15 +1,13 @@
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime as dt, timezone as tz
|
||||
|
||||
from flask import current_app
|
||||
from graypy import GELFUDPHandler
|
||||
import logging
|
||||
import logging.config
|
||||
|
||||
# Graylog configuration
|
||||
GRAYLOG_HOST = os.environ.get('GRAYLOG_HOST', 'localhost')
|
||||
GRAYLOG_PORT = int(os.environ.get('GRAYLOG_PORT', 12201))
|
||||
env = os.environ.get('FLASK_ENV', 'development')
|
||||
|
||||
|
||||
@@ -144,23 +142,6 @@ class TuningFormatter(logging.Formatter):
|
||||
return formatted_msg
|
||||
|
||||
|
||||
class GraylogFormatter(logging.Formatter):
|
||||
"""Maintains existing Graylog formatting while adding tuning fields"""
|
||||
|
||||
def format(self, record):
|
||||
if getattr(record, 'is_tuning_log', False):
|
||||
# Add tuning-specific fields to Graylog
|
||||
record.tuning_fields = {
|
||||
'is_tuning_log': True,
|
||||
'tuning_type': record.tuning_type,
|
||||
'tenant_id': record.tenant_id,
|
||||
'catalog_id': record.catalog_id,
|
||||
'specialist_id': record.specialist_id,
|
||||
'retriever_id': record.retriever_id,
|
||||
'processor_id': record.processor_id,
|
||||
'session_id': record.session_id,
|
||||
}
|
||||
return super().format(record)
|
||||
|
||||
class TuningLogger:
|
||||
"""Helper class to manage tuning logs with consistent structure"""
|
||||
@@ -177,10 +158,10 @@ class TuningLogger:
|
||||
specialist_id: Optional specialist ID for context
|
||||
retriever_id: Optional retriever ID for context
|
||||
processor_id: Optional processor ID for context
|
||||
session_id: Optional session ID for context and log file naming
|
||||
log_file: Optional custom log file name to use
|
||||
session_id: Optional session ID for context
|
||||
log_file: Optional custom log file name (ignored - all logs go to tuning.log)
|
||||
"""
|
||||
|
||||
# Always use the standard tuning logger
|
||||
self.logger = logging.getLogger(logger_name)
|
||||
self.tenant_id = tenant_id
|
||||
self.catalog_id = catalog_id
|
||||
@@ -188,63 +169,8 @@ class TuningLogger:
|
||||
self.retriever_id = retriever_id
|
||||
self.processor_id = processor_id
|
||||
self.session_id = session_id
|
||||
self.log_file = log_file
|
||||
# Determine whether to use a session-specific logger
|
||||
if session_id:
|
||||
# Create a unique logger name for this session
|
||||
session_logger_name = f"{logger_name}_{session_id}"
|
||||
self.logger = logging.getLogger(session_logger_name)
|
||||
|
||||
# If this logger doesn't have handlers yet, configure it
|
||||
if not self.logger.handlers:
|
||||
# Determine log file path
|
||||
if not log_file and session_id:
|
||||
log_file = f"logs/tuning_{session_id}.log"
|
||||
elif not log_file:
|
||||
log_file = "logs/tuning.log"
|
||||
|
||||
# Configure the logger
|
||||
self._configure_session_logger(log_file)
|
||||
else:
|
||||
# Use the standard tuning logger
|
||||
self.logger = logging.getLogger(logger_name)
|
||||
|
||||
def _configure_session_logger(self, log_file):
|
||||
"""Configure a new session-specific logger with appropriate handlers"""
|
||||
# Create and configure a file handler
|
||||
file_handler = logging.handlers.RotatingFileHandler(
|
||||
filename=log_file,
|
||||
maxBytes=1024 * 1024 * 3, # 3MB
|
||||
backupCount=3
|
||||
)
|
||||
file_handler.setFormatter(TuningFormatter())
|
||||
file_handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Add the file handler to the logger
|
||||
self.logger.addHandler(file_handler)
|
||||
|
||||
# Add Graylog handler in production
|
||||
env = os.environ.get('FLASK_ENV', 'development')
|
||||
if env == 'production':
|
||||
try:
|
||||
graylog_handler = GELFUDPHandler(
|
||||
host=GRAYLOG_HOST,
|
||||
port=GRAYLOG_PORT,
|
||||
debugging_fields=True
|
||||
)
|
||||
graylog_handler.setFormatter(GraylogFormatter())
|
||||
self.logger.addHandler(graylog_handler)
|
||||
except Exception as e:
|
||||
# Fall back to just file logging if Graylog setup fails
|
||||
fallback_logger = logging.getLogger('eveai_app')
|
||||
fallback_logger.warning(f"Failed to set up Graylog handler: {str(e)}")
|
||||
|
||||
# Set logger level and disable propagation
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
self.logger.propagate = False
|
||||
|
||||
|
||||
def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DEBUG):
|
||||
def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DEBUG):
|
||||
"""Log a tuning event with structured data"""
|
||||
try:
|
||||
# Create a standard LogRecord for tuning
|
||||
@@ -275,13 +201,82 @@ def log_tuning(self, tuning_type: str, message: str, data=None, level=logging.DE
|
||||
self.logger.handle(record)
|
||||
|
||||
except Exception as e:
|
||||
fallback_logger = logging.getLogger('eveai_workers')
|
||||
fallback_logger.exception(f"Failed to log tuning message: {str(e)}")
|
||||
print(f"Failed to log tuning message: {str(e)}")
|
||||
|
||||
|
||||
# Set the custom log record factory
|
||||
logging.setLogRecordFactory(TuningLogRecord)
|
||||
|
||||
def configure_logging():
|
||||
"""Configure logging based on environment
|
||||
|
||||
When running in Kubernetes, directs logs to stdout in JSON format
|
||||
Otherwise uses file-based logging for development/testing
|
||||
"""
|
||||
try:
|
||||
# Verkrijg het absolute pad naar de logs directory
|
||||
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
logs_dir = os.path.join(base_dir, 'logs')
|
||||
|
||||
# Zorg ervoor dat de logs directory bestaat met de juiste permissies
|
||||
if not os.path.exists(logs_dir):
|
||||
try:
|
||||
os.makedirs(logs_dir, exist_ok=True)
|
||||
print(f"Logs directory aangemaakt op: {logs_dir}")
|
||||
except (IOError, PermissionError) as e:
|
||||
print(f"WAARSCHUWING: Kan logs directory niet aanmaken: {e}")
|
||||
print(f"Logs worden mogelijk niet correct geschreven!")
|
||||
|
||||
# Check if running in Kubernetes
|
||||
in_kubernetes = os.environ.get('KUBERNETES_SERVICE_HOST') is not None
|
||||
|
||||
# Controleer of de pythonjsonlogger pakket beschikbaar is als we in Kubernetes zijn
|
||||
if in_kubernetes:
|
||||
try:
|
||||
import pythonjsonlogger.jsonlogger
|
||||
has_json_logger = True
|
||||
except ImportError:
|
||||
print("WAARSCHUWING: python-json-logger pakket is niet geïnstalleerd.")
|
||||
print("Voer 'pip install python-json-logger>=2.0.7' uit om JSON logging in te schakelen.")
|
||||
print("Terugvallen op standaard logging formaat.")
|
||||
has_json_logger = False
|
||||
in_kubernetes = False # Fall back to standard logging
|
||||
else:
|
||||
has_json_logger = False
|
||||
|
||||
# Apply the configuration
|
||||
logging_config = dict(LOGGING)
|
||||
|
||||
# Wijzig de json_console handler om terug te vallen op console als pythonjsonlogger niet beschikbaar is
|
||||
if not has_json_logger and 'json_console' in logging_config['handlers']:
|
||||
# Vervang json_console handler door een console handler met standaard formatter
|
||||
logging_config['handlers']['json_console']['formatter'] = 'standard'
|
||||
|
||||
# In Kubernetes, conditionally modify specific loggers to use JSON console output
|
||||
# This preserves the same logger names but changes where/how they log
|
||||
if in_kubernetes:
|
||||
for logger_name in logging_config['loggers']:
|
||||
if logger_name: # Skip the root logger
|
||||
logging_config['loggers'][logger_name]['handlers'] = ['json_console']
|
||||
|
||||
# Controleer of de logs directory schrijfbaar is voordat we de configuratie toepassen
|
||||
logs_dir = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs')
|
||||
if os.path.exists(logs_dir) and not os.access(logs_dir, os.W_OK):
|
||||
print(f"WAARSCHUWING: Logs directory bestaat maar is niet schrijfbaar: {logs_dir}")
|
||||
print("Logs worden mogelijk niet correct geschreven!")
|
||||
|
||||
logging.config.dictConfig(logging_config)
|
||||
logging.info(f"Logging configured. Environment: {'Kubernetes' if in_kubernetes else 'Development/Testing'}")
|
||||
logging.info(f"Logs directory: {logs_dir}")
|
||||
except Exception as e:
|
||||
print(f"Error configuring logging: {str(e)}")
|
||||
print("Gedetailleerde foutinformatie:")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
# Fall back to basic configuration
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
@@ -290,7 +285,7 @@ LOGGING = {
|
||||
'file_app': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_app.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_app.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -298,15 +293,15 @@ LOGGING = {
|
||||
'file_workers': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_workers.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_workers.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'file_chat': {
|
||||
'file_chat_client': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_chat.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_chat_client.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -314,7 +309,7 @@ LOGGING = {
|
||||
'file_chat_workers': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_chat_workers.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_chat_workers.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -322,7 +317,7 @@ LOGGING = {
|
||||
'file_api': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_api.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_api.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -330,7 +325,7 @@ LOGGING = {
|
||||
'file_beat': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_beat.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_beat.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -338,7 +333,7 @@ LOGGING = {
|
||||
'file_entitlements': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_entitlements.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'eveai_entitlements.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -346,7 +341,7 @@ LOGGING = {
|
||||
'file_sqlalchemy': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/sqlalchemy.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'sqlalchemy.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -354,7 +349,7 @@ LOGGING = {
|
||||
'file_security': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/security.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'security.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -362,7 +357,7 @@ LOGGING = {
|
||||
'file_rag_tuning': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/rag_tuning.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'rag_tuning.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -370,7 +365,7 @@ LOGGING = {
|
||||
'file_embed_tuning': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/embed_tuning.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'embed_tuning.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -378,7 +373,7 @@ LOGGING = {
|
||||
'file_business_events': {
|
||||
'level': 'INFO',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/business_events.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'business_events.log'),
|
||||
'maxBytes': 1024 * 1024 * 1, # 1MB
|
||||
'backupCount': 2,
|
||||
'formatter': 'standard',
|
||||
@@ -388,98 +383,102 @@ LOGGING = {
|
||||
'level': 'DEBUG',
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'json_console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'level': 'INFO',
|
||||
'formatter': 'json',
|
||||
'stream': 'ext://sys.stdout',
|
||||
},
|
||||
'tuning_file': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/tuning.log',
|
||||
'filename': os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'logs', 'tuning.log'),
|
||||
'maxBytes': 1024 * 1024 * 3, # 3MB
|
||||
'backupCount': 3,
|
||||
'formatter': 'tuning',
|
||||
},
|
||||
'graylog': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'graypy.GELFUDPHandler',
|
||||
'host': GRAYLOG_HOST,
|
||||
'port': GRAYLOG_PORT,
|
||||
'debugging_fields': True,
|
||||
'formatter': 'graylog'
|
||||
},
|
||||
},
|
||||
'formatters': {
|
||||
'standard': {
|
||||
'format': '%(asctime)s [%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d]: %(message)s',
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S'
|
||||
},
|
||||
'graylog': {
|
||||
'format': '[%(levelname)s] %(name)s (%(component)s) [%(module)s:%(lineno)d in %(funcName)s] '
|
||||
'[Thread: %(threadName)s]: %(message)s',
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S',
|
||||
'()': GraylogFormatter
|
||||
},
|
||||
'tuning': {
|
||||
'()': TuningFormatter,
|
||||
'datefmt': '%Y-%m-%d %H:%M:%S UTC'
|
||||
},
|
||||
'json': {
|
||||
'format': '%(message)s',
|
||||
'class': 'logging.Formatter' if not 'pythonjsonlogger' in sys.modules else 'pythonjsonlogger.jsonlogger.JsonFormatter',
|
||||
'json_default': lambda obj: str(obj) if isinstance(obj, (dt, Exception)) else None,
|
||||
'json_ensure_ascii': False,
|
||||
'rename_fields': {
|
||||
'asctime': 'timestamp',
|
||||
'levelname': 'severity'
|
||||
},
|
||||
'timestamp': True,
|
||||
'datefmt': '%Y-%m-%dT%H:%M:%S.%fZ'
|
||||
}
|
||||
},
|
||||
'loggers': {
|
||||
'eveai_app': { # logger for the eveai_app
|
||||
'handlers': ['file_app', 'graylog', ] if env == 'production' else ['file_app', ],
|
||||
'handlers': ['file_app'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_workers': { # logger for the eveai_workers
|
||||
'handlers': ['file_workers', 'graylog', ] if env == 'production' else ['file_workers', ],
|
||||
'handlers': ['file_workers'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_chat': { # logger for the eveai_chat
|
||||
'handlers': ['file_chat', 'graylog', ] if env == 'production' else ['file_chat', ],
|
||||
'eveai_chat_client': { # logger for the eveai_chat
|
||||
'handlers': ['file_chat_client'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_chat_workers': { # logger for the eveai_chat_workers
|
||||
'handlers': ['file_chat_workers', 'graylog', ] if env == 'production' else ['file_chat_workers', ],
|
||||
'handlers': ['file_chat_workers'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_api': { # logger for the eveai_chat_workers
|
||||
'handlers': ['file_api', 'graylog', ] if env == 'production' else ['file_api', ],
|
||||
'eveai_api': { # logger for the eveai_api
|
||||
'handlers': ['file_api'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_beat': { # logger for the eveai_beat
|
||||
'handlers': ['file_beat', 'graylog', ] if env == 'production' else ['file_beat', ],
|
||||
'handlers': ['file_beat'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_entitlements': { # logger for the eveai_entitlements
|
||||
'handlers': ['file_entitlements', 'graylog', ] if env == 'production' else ['file_entitlements', ],
|
||||
'handlers': ['file_entitlements'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'sqlalchemy.engine': { # logger for the sqlalchemy
|
||||
'handlers': ['file_sqlalchemy', 'graylog', ] if env == 'production' else ['file_sqlalchemy', ],
|
||||
'handlers': ['file_sqlalchemy'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'security': { # logger for the security
|
||||
'handlers': ['file_security', 'graylog', ] if env == 'production' else ['file_security', ],
|
||||
'handlers': ['file_security'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'business_events': {
|
||||
'handlers': ['file_business_events', 'graylog'],
|
||||
'handlers': ['file_business_events'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
# Single tuning logger
|
||||
'tuning': {
|
||||
'handlers': ['tuning_file', 'graylog'] if env == 'production' else ['tuning_file'],
|
||||
'handlers': ['tuning_file'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False,
|
||||
},
|
||||
'': { # root logger
|
||||
'handlers': ['console'],
|
||||
'handlers': ['console'] if os.environ.get('KUBERNETES_SERVICE_HOST') is None else ['json_console'],
|
||||
'level': 'WARNING', # Set higher level for root to minimize noise
|
||||
'propagate': False
|
||||
},
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
version: "1.0.0"
|
||||
name: "Knowledge Service"
|
||||
configuration: {}
|
||||
permissions: {}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-04-02"
|
||||
changes: "Initial version"
|
||||
description: "Partner providing catalog content"
|
||||
@@ -0,0 +1,14 @@
|
||||
version: "1.0.0"
|
||||
name: "HTML Processor"
|
||||
file_types: "html"
|
||||
description: "A processor for HTML files, driven by AI"
|
||||
configuration:
|
||||
custom_instructions:
|
||||
name: "Custom Instructions"
|
||||
description: "Some custom instruction to guide our AI agent in parsing your HTML file"
|
||||
type: "text"
|
||||
required: false
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-25"
|
||||
description: "A processor for HTML files, driven by AI"
|
||||
@@ -42,7 +42,7 @@ configuration:
|
||||
image_handling:
|
||||
name: "Image Handling"
|
||||
type: "enum"
|
||||
description: "How to handle embedded images"
|
||||
description: "How to handle embedded img"
|
||||
required: false
|
||||
default: "skip"
|
||||
allowed_values: ["skip", "extract", "placeholder"]
|
||||
|
||||
30
config/prompts/globals/automagic_html_parse/1.0.0.yaml
Normal file
30
config/prompts/globals/automagic_html_parse/1.0.0.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
version: "1.0.0"
|
||||
content: |
|
||||
You are a top administrative assistant specialized in transforming given HTML into markdown formatted files. The
|
||||
generated files will be used to generate embeddings in a RAG-system.
|
||||
|
||||
# Best practices are:
|
||||
- Respect wordings and language(s) used in the HTML.
|
||||
- The following items need to be considered: headings, paragraphs, listed items (numbered or not) and tables. Images can be neglected.
|
||||
- Sub-headers can be used as lists. This is true when a header is followed by a series of sub-headers without content (paragraphs or listed items). Present those sub-headers as a list.
|
||||
- Be careful of encoding of the text. Everything needs to be human readable.
|
||||
|
||||
You only return relevant information, and filter out non-relevant information, such as:
|
||||
- information found in menu bars, sidebars, footers or headers
|
||||
- information in forms, buttons
|
||||
|
||||
Process the file or text carefully, and take a stepped approach. The resulting markdown should be the result of the
|
||||
processing of the complete input html file. Answer with the pure markdown, without any other text.
|
||||
|
||||
{custom_instructions}
|
||||
|
||||
HTML to be processed is in between triple backquotes.
|
||||
|
||||
```{html}```
|
||||
|
||||
llm_model: "mistral.mistral-small-latest"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-25"
|
||||
description: "An aid in transforming HTML-based inputs to markdown, fully automatic"
|
||||
changes: "Initial version"
|
||||
@@ -0,0 +1,22 @@
|
||||
version: "1.0.0"
|
||||
content: >
|
||||
Check if there are other elements available in the provided text (in between triple $) than answers to the
|
||||
following question (in between triple €):
|
||||
|
||||
€€€
|
||||
{question}
|
||||
€€€
|
||||
|
||||
Provided text:
|
||||
|
||||
$$$
|
||||
{answer}
|
||||
$$$
|
||||
|
||||
Answer with True or False, without additional information.
|
||||
llm_model: "mistral.mistral-medium-latest"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-23"
|
||||
description: "An assistant to check if the answer to a question is affirmative."
|
||||
changes: "Initial version"
|
||||
17
config/prompts/globals/check_affirmative_answer/1.0.0.yaml
Normal file
17
config/prompts/globals/check_affirmative_answer/1.0.0.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
version: "1.0.0"
|
||||
content: >
|
||||
Determine if there is an affirmative answer on the following question (in between triple backquotes):
|
||||
|
||||
```{question}```
|
||||
|
||||
in the provided answer (in between triple backquotes):
|
||||
|
||||
```{answer}```
|
||||
|
||||
Answer with True or False, without additional information.
|
||||
llm_model: "mistral.mistral-medium-latest"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-23"
|
||||
description: "An assistant to check if the answer to a question is affirmative."
|
||||
changes: "Initial version"
|
||||
16
config/prompts/globals/get_answer_to_question/1.0.0.yaml
Normal file
16
config/prompts/globals/get_answer_to_question/1.0.0.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
version: "1.0.0"
|
||||
content: >
|
||||
Provide us with the answer to the following question (in between triple backquotes) from the text provided to you:
|
||||
|
||||
```{question}````
|
||||
|
||||
Reply in exact wordings and in the same language. If no answer can be found, reply with "No answer provided"
|
||||
|
||||
Text provided to you:
|
||||
```{answer}```
|
||||
llm_model: "mistral.mistral-medium-latest"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-23"
|
||||
description: "An assistant to check if the answer to a question is affirmative."
|
||||
changes: "Initial version"
|
||||
@@ -4,7 +4,7 @@ content: |
|
||||
question is understandable without that history. The conversation is a consequence of questions and context provided
|
||||
by the HUMAN, and the AI (you) answering back, in chronological order. The most recent (i.e. last) elements are the
|
||||
most important when detailing the question.
|
||||
You answer by stating the detailed question in {language}.
|
||||
You return the only the detailed question in {language}. Without any additional information.
|
||||
History:
|
||||
```{history}```
|
||||
Question to be detailed:
|
||||
|
||||
25
config/prompts/globals/translation_with_context/1.0.0.yaml
Normal file
25
config/prompts/globals/translation_with_context/1.0.0.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
version: "1.0.0"
|
||||
content: >
|
||||
You are a top translator. We need you to translate (in between triple quotes)
|
||||
|
||||
'''{text_to_translate}'''
|
||||
|
||||
into '{target_language}', taking
|
||||
into account this context:
|
||||
|
||||
'{context}'
|
||||
|
||||
These are best practices you should follow:
|
||||
|
||||
- Do not translate text in between double square brackets, as these are names or terms that need to remain intact. Remove the square brackets in the translation!
|
||||
- We use inline tags (Custom HTML/XML-like tags). Ensure the tags themself are not translated and remain intact in the translation. The text inbetween the tags should be translated. e.g. "<terms_and_conditions>Terms & Conditions</terms_and_conditions>" translates in Dutch to <terms_and_conditions>Gebruiksvoorwaarden</terms_and_conditions>
|
||||
- Remove the triple quotes in your translation!
|
||||
|
||||
I only want you to return the translation. No explanation, no options. I need to be able to directly use your answer
|
||||
without further interpretation. If more than one option is available, present me with the most probable one.
|
||||
llm_model: "mistral.mistral-medium-latest"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-23"
|
||||
description: "An assistant to translate given a context."
|
||||
changes: "Initial version"
|
||||
@@ -0,0 +1,22 @@
|
||||
version: "1.0.0"
|
||||
content: >
|
||||
You are a top translator. We need you to translate (in between triple quotes)
|
||||
|
||||
'''{text_to_translate}'''
|
||||
|
||||
into '{target_language}'.
|
||||
|
||||
These are best practices you should follow:
|
||||
|
||||
- Do not translate text in between double square brackets, as these are names or terms that need to remain intact. Remove the square brackets in the translation!
|
||||
- We use inline tags (Custom HTML/XML-like tags). Ensure the tags themself are not translated and remain intact in the translation. The text inbetween the tags should be translated. e.g. "<terms_and_conditions>Terms & Conditions</terms_and_conditions>" translates in Dutch to <terms_and_conditions>Gebruiksvoorwaarden</terms_and_conditions>
|
||||
- Remove the triple quotes in your translation!
|
||||
|
||||
I only want you to return the translation. No explanation, no options. I need to be able to directly use your answer
|
||||
without further interpretation. If more than one option is available, present me with the most probable one.
|
||||
llm_model: "mistral.mistral-medium-latest"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-23"
|
||||
description: "An assistant to translate without context."
|
||||
changes: "Initial version"
|
||||
15
config/prompts/globals/user_action_classifier/1.0.0.yaml
Normal file
15
config/prompts/globals/user_action_classifier/1.0.0.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
version: "1.0.0"
|
||||
content: |
|
||||
Classify the prompt you receive from an end user, according to the following information:
|
||||
|
||||
{user_action_classes}
|
||||
|
||||
Use the CLASS DESCRIPTION to identify the CLASS of the question asked. Return the value of CLASS. If the prompt doesn't correspond to any CLASS DESCRIPTION, return NONE. No layout is required.
|
||||
|
||||
llm_model: "mistral.mistral-small-latest"
|
||||
temperature: 0.7
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-11-14"
|
||||
description: "Assistant to classify user intent"
|
||||
changes: "Initial version"
|
||||
21
config/retrievers/evie_partner/PARTNER_RAG/1.0.0.yaml
Normal file
21
config/retrievers/evie_partner/PARTNER_RAG/1.0.0.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: "1.0.0"
|
||||
name: "Standard RAG Retriever"
|
||||
configuration:
|
||||
es_k:
|
||||
name: "es_k"
|
||||
type: "integer"
|
||||
description: "K-value to retrieve embeddings (max embeddings retrieved)"
|
||||
required: true
|
||||
default: 8
|
||||
es_similarity_threshold:
|
||||
name: "es_similarity_threshold"
|
||||
type: "float"
|
||||
description: "Similarity threshold for retrieving embeddings"
|
||||
required: true
|
||||
default: 0.3
|
||||
arguments: {}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-24"
|
||||
changes: "Initial version"
|
||||
description: "Retrieving all embeddings conform the query"
|
||||
@@ -1,36 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "DOSSIER Retriever"
|
||||
configuration:
|
||||
es_k:
|
||||
name: "es_k"
|
||||
type: "int"
|
||||
description: "K-value to retrieve embeddings (max embeddings retrieved)"
|
||||
required: true
|
||||
default: 8
|
||||
es_similarity_threshold:
|
||||
name: "es_similarity_threshold"
|
||||
type: "float"
|
||||
description: "Similarity threshold for retrieving embeddings"
|
||||
required: true
|
||||
default: 0.3
|
||||
tagging_fields_filter:
|
||||
name: "Tagging Fields Filter"
|
||||
type: "tagging_fields_filter"
|
||||
description: "Filter JSON to retrieve a subset of documents"
|
||||
required: true
|
||||
dynamic_arguments:
|
||||
name: "Dynamic Arguments"
|
||||
type: "dynamic_arguments"
|
||||
description: "dynamic arguments used in the filter"
|
||||
required: false
|
||||
arguments:
|
||||
query:
|
||||
name: "query"
|
||||
type: "str"
|
||||
description: "Query to retrieve embeddings"
|
||||
required: True
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-03-11"
|
||||
changes: "Initial version"
|
||||
description: "Retrieving all embeddings conform the query and the tagging fields filter"
|
||||
@@ -3,7 +3,7 @@ name: "Standard RAG Retriever"
|
||||
configuration:
|
||||
es_k:
|
||||
name: "es_k"
|
||||
type: "int"
|
||||
type: "integer"
|
||||
description: "K-value to retrieve embeddings (max embeddings retrieved)"
|
||||
required: true
|
||||
default: 8
|
||||
@@ -13,12 +13,7 @@ configuration:
|
||||
description: "Similarity threshold for retrieving embeddings"
|
||||
required: true
|
||||
default: 0.3
|
||||
arguments:
|
||||
query:
|
||||
name: "query"
|
||||
type: "str"
|
||||
description: "Query to retrieve embeddings"
|
||||
required: True
|
||||
arguments: {}
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-24"
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
version: "1.0.0"
|
||||
name: "Retrieves role information for a specific role"
|
||||
configuration:
|
||||
es_k:
|
||||
name: "es_k"
|
||||
type: "integer"
|
||||
description: "K-value to retrieve embeddings (max embeddings retrieved)"
|
||||
required: true
|
||||
default: 8
|
||||
es_similarity_threshold:
|
||||
name: "es_similarity_threshold"
|
||||
type: "float"
|
||||
description: "Similarity threshold for retrieving embeddings"
|
||||
required: true
|
||||
default: 0.3
|
||||
arguments:
|
||||
role_reference:
|
||||
name: "Role Reference"
|
||||
type: "string"
|
||||
description: "The role information needs to be retrieved for"
|
||||
required: true
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-07"
|
||||
changes: "Initial version"
|
||||
description: "Retrieves role information for a specific role"
|
||||
@@ -0,0 +1,36 @@
|
||||
type: "CONTACT_TIME_PREFERENCES_SIMPLE"
|
||||
version: "1.0.0"
|
||||
name: "Contact Time Preferences"
|
||||
icon: "calendar_month"
|
||||
fields:
|
||||
early:
|
||||
name: "Early in the morning"
|
||||
description: "Contact me early in the morning"
|
||||
type: "boolean"
|
||||
required: false
|
||||
# It is possible to also add a field 'context'. It allows you to provide an elaborate piece of information.
|
||||
late_morning:
|
||||
name: "During the morning"
|
||||
description: "Contact me during the morning"
|
||||
type: "boolean"
|
||||
required: false
|
||||
afternoon:
|
||||
name: "In the afternoon"
|
||||
description: "Contact me in the afternoon"
|
||||
type: "boolean"
|
||||
required: false
|
||||
evening:
|
||||
name: "In the evening"
|
||||
description: "Contact me in the evening"
|
||||
type: "boolean"
|
||||
required: false
|
||||
other:
|
||||
name: "Other"
|
||||
description: "Specify your preferred contact moment"
|
||||
type: "string"
|
||||
required: false
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-22"
|
||||
changes: "Initial Version"
|
||||
description: "Simple Contact Time Preferences Form"
|
||||
@@ -0,0 +1,36 @@
|
||||
type: "PERSONAL_CONTACT_FORM"
|
||||
version: "1.0.0"
|
||||
name: "Personal Contact Form"
|
||||
icon: "person"
|
||||
fields:
|
||||
name:
|
||||
name: "Name"
|
||||
description: "Your name"
|
||||
type: "str"
|
||||
required: true
|
||||
# It is possible to also add a field 'context'. It allows you to provide an elaborate piece of information.
|
||||
email:
|
||||
name: "Email"
|
||||
type: "str"
|
||||
description: "Your Name"
|
||||
required: true
|
||||
phone:
|
||||
name: "Phone Number"
|
||||
type: "str"
|
||||
description: "Your Phone Number"
|
||||
required: true
|
||||
consent:
|
||||
name: "Consent"
|
||||
type: "boolean"
|
||||
description: "Consent"
|
||||
required: true
|
||||
meta:
|
||||
kind: "consent"
|
||||
consentRich: "Ik Agree with the <terms>Terms and Conditions</terms> and the <dpa>Privacy Statement</dpa> of Ask Eve AI"
|
||||
ariaPrivacy: "Open privacyverklaring in a modal dialog"
|
||||
ariaTerms: "Open algemene voorwaarden in a modal dialog"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-29"
|
||||
changes: "Initial Version"
|
||||
description: "Personal Contact Form"
|
||||
@@ -0,0 +1,51 @@
|
||||
type: "PERSONAL_CONTACT_FORM"
|
||||
version: "1.0.0"
|
||||
name: "Personal Contact Form"
|
||||
icon: "person"
|
||||
fields:
|
||||
name:
|
||||
name: "Name"
|
||||
description: "Your name"
|
||||
type: "str"
|
||||
required: true
|
||||
# It is possible to also add a field 'context'. It allows you to provide an elaborate piece of information.
|
||||
email:
|
||||
name: "Email"
|
||||
type: "str"
|
||||
description: "Your Name"
|
||||
required: true
|
||||
phone:
|
||||
name: "Phone Number"
|
||||
type: "str"
|
||||
description: "Your Phone Number"
|
||||
required: true
|
||||
address:
|
||||
name: "Address"
|
||||
type: "string"
|
||||
description: "Your Address"
|
||||
required: false
|
||||
zip:
|
||||
name: "Postal Code"
|
||||
type: "string"
|
||||
description: "Postal Code"
|
||||
required: false
|
||||
city:
|
||||
name: "City"
|
||||
type: "string"
|
||||
description: "City"
|
||||
required: false
|
||||
country:
|
||||
name: "Country"
|
||||
type: "string"
|
||||
description: "Country"
|
||||
required: false
|
||||
consent:
|
||||
name: "Consent"
|
||||
type: "boolean"
|
||||
description: "Consent"
|
||||
required: true
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-18"
|
||||
changes: "Initial Version"
|
||||
description: "Personal Contact Form"
|
||||
@@ -0,0 +1,50 @@
|
||||
type: "PROFESSIONAL_CONTACT_FORM"
|
||||
version: "1.0.0"
|
||||
name: "Professional Contact Form"
|
||||
icon: "account_circle"
|
||||
fields:
|
||||
name:
|
||||
name: "Name"
|
||||
description: "Your name"
|
||||
type: "str"
|
||||
required: true
|
||||
email:
|
||||
name: "Email"
|
||||
type: "str"
|
||||
description: "Your Email"
|
||||
required: true
|
||||
phone:
|
||||
name: "Phone Number"
|
||||
type: "str"
|
||||
description: "Your Phone Number"
|
||||
required: true
|
||||
company:
|
||||
name: "Company Name"
|
||||
type: "str"
|
||||
description: "Company Name"
|
||||
required: true
|
||||
job_title:
|
||||
name: "Job Title"
|
||||
type: "str"
|
||||
description: "Job Title"
|
||||
required: false
|
||||
city:
|
||||
name: "City"
|
||||
type: "str"
|
||||
description: "City"
|
||||
required: false
|
||||
country:
|
||||
name: "Country"
|
||||
type: "str"
|
||||
description: "Country"
|
||||
required: false
|
||||
consent:
|
||||
name: "Consent"
|
||||
type: "bool"
|
||||
description: "Consent"
|
||||
required: true
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-06-18"
|
||||
changes: "Initial Version"
|
||||
description: "Professional Contact Form"
|
||||
@@ -0,0 +1,34 @@
|
||||
version: "1.0.0"
|
||||
name: "Partner RAG Specialist"
|
||||
framework: "crewai"
|
||||
chat: true
|
||||
configuration: {}
|
||||
arguments: {}
|
||||
results:
|
||||
rag_output:
|
||||
answer:
|
||||
name: "answer"
|
||||
type: "str"
|
||||
description: "Answer to the query"
|
||||
required: true
|
||||
citations:
|
||||
name: "citations"
|
||||
type: "List[str]"
|
||||
description: "List of citations"
|
||||
required: false
|
||||
insufficient_info:
|
||||
name: "insufficient_info"
|
||||
type: "bool"
|
||||
description: "Whether or not the query is insufficient info"
|
||||
required: true
|
||||
agents:
|
||||
- type: "PARTNER_RAG_AGENT"
|
||||
version: "1.0"
|
||||
tasks:
|
||||
- type: "PARTNER_RAG_TASK"
|
||||
version: "1.0"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-16"
|
||||
changes: "Initial version"
|
||||
description: "Q&A through Partner RAG Specialist (for documentation purposes)"
|
||||
@@ -19,11 +19,6 @@ arguments:
|
||||
type: "str"
|
||||
description: "Language code to be used for receiving questions and giving answers"
|
||||
required: true
|
||||
query:
|
||||
name: "query"
|
||||
type: "str"
|
||||
description: "Query or response to process"
|
||||
required: true
|
||||
results:
|
||||
rag_output:
|
||||
answer:
|
||||
|
||||
49
config/specialists/globals/RAG_SPECIALIST/1.1.0.yaml
Normal file
49
config/specialists/globals/RAG_SPECIALIST/1.1.0.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
version: "1.1.0"
|
||||
name: "RAG Specialist"
|
||||
framework: "crewai"
|
||||
chat: true
|
||||
configuration:
|
||||
name:
|
||||
name: "name"
|
||||
type: "str"
|
||||
description: "The name the specialist is called upon."
|
||||
required: true
|
||||
welcome_message:
|
||||
name: "Welcome Message"
|
||||
type: "string"
|
||||
description: "Welcome Message to be given to the end user"
|
||||
required: false
|
||||
arguments:
|
||||
language:
|
||||
name: "Language"
|
||||
type: "str"
|
||||
description: "Language code to be used for receiving questions and giving answers"
|
||||
required: true
|
||||
results:
|
||||
rag_output:
|
||||
answer:
|
||||
name: "answer"
|
||||
type: "str"
|
||||
description: "Answer to the query"
|
||||
required: true
|
||||
citations:
|
||||
name: "citations"
|
||||
type: "List[str]"
|
||||
description: "List of citations"
|
||||
required: false
|
||||
insufficient_info:
|
||||
name: "insufficient_info"
|
||||
type: "bool"
|
||||
description: "Whether or not the query is insufficient info"
|
||||
required: true
|
||||
agents:
|
||||
- type: "RAG_AGENT"
|
||||
version: "1.1"
|
||||
tasks:
|
||||
- type: "RAG_TASK"
|
||||
version: "1.1"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
changes: "Initial version"
|
||||
description: "A Specialist that performs Q&A activities"
|
||||
81
config/specialists/globals/RAG_SPECIALIST/1.2.0.yaml
Normal file
81
config/specialists/globals/RAG_SPECIALIST/1.2.0.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
version: "1.2.0"
|
||||
name: "RAG Specialist"
|
||||
framework: "crewai"
|
||||
chat: true
|
||||
configuration:
|
||||
name:
|
||||
name: "name"
|
||||
type: "str"
|
||||
description: "The name the specialist is called upon."
|
||||
required: true
|
||||
tone_of_voice:
|
||||
name: "Tone of Voice"
|
||||
description: "The tone of voice the specialist uses to communicate"
|
||||
type: "enum"
|
||||
allowed_values: [ "Professional & Neutral", "Warm & Empathetic", "Energetic & Enthusiastic", "Accessible & Informal", "Expert & Trustworthy", "No-nonsense & Goal-driven" ]
|
||||
default: "Professional & Neutral"
|
||||
required: true
|
||||
language_level:
|
||||
name: "Language Level"
|
||||
description: "Language level to be used when communicating, relating to CEFR levels"
|
||||
type: "enum"
|
||||
allowed_values: [ "Basic", "Standard", "Professional" ]
|
||||
default: "Standard"
|
||||
required: true
|
||||
response_depth:
|
||||
name: "Response Depth"
|
||||
description: "Response depth to be used when communicating"
|
||||
type: "enum"
|
||||
allowed_values: [ "Concise", "Balanced", "Detailed",]
|
||||
default: "Balanced"
|
||||
required: true
|
||||
conversation_purpose:
|
||||
name: "Conversation Purpose"
|
||||
description: "Purpose of the conversation, resulting in communication style"
|
||||
type: "enum"
|
||||
allowed_values: [ "Informative", "Persuasive", "Supportive", "Collaborative" ]
|
||||
default: "Informative"
|
||||
required: true
|
||||
welcome_message:
|
||||
name: "Welcome Message"
|
||||
type: "string"
|
||||
description: "Welcome Message to be given to the end user"
|
||||
required: false
|
||||
arguments:
|
||||
language:
|
||||
name: "Language"
|
||||
type: "str"
|
||||
description: "Language code to be used for receiving questions and giving answers"
|
||||
required: true
|
||||
results:
|
||||
rag_output:
|
||||
answer:
|
||||
name: "answer"
|
||||
type: "str"
|
||||
description: "Answer to the query"
|
||||
required: true
|
||||
citations:
|
||||
name: "citations"
|
||||
type: "List[str]"
|
||||
description: "List of citations"
|
||||
required: false
|
||||
insufficient_info:
|
||||
name: "insufficient_info"
|
||||
type: "bool"
|
||||
description: "Whether or not the query is insufficient info"
|
||||
required: true
|
||||
agents:
|
||||
- type: "RAG_AGENT"
|
||||
version: "1.2"
|
||||
- type: "RAG_PROOFREADER_AGENT"
|
||||
version: "1.0"
|
||||
tasks:
|
||||
- type: "RAG_TASK"
|
||||
version: "1.1"
|
||||
- type: "RAG_PROOFREADING_TASK"
|
||||
version: "1.0"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
changes: "Initial version"
|
||||
description: "A Specialist that performs Q&A activities"
|
||||
@@ -1,183 +0,0 @@
|
||||
version: "1.0.0"
|
||||
name: "Spin Sales Specialist"
|
||||
framework: "crewai"
|
||||
chat: true
|
||||
configuration:
|
||||
name:
|
||||
name: "name"
|
||||
type: "str"
|
||||
description: "The name the specialist is called upon."
|
||||
required: true
|
||||
company:
|
||||
name: "company"
|
||||
type: "str"
|
||||
description: "The name of your company. If not provided, your tenant's name will be used."
|
||||
required: false
|
||||
products:
|
||||
name: "products"
|
||||
type: "List[str]"
|
||||
description: "The products or services you're providing"
|
||||
required: false
|
||||
product_information:
|
||||
name: "product_information"
|
||||
type: "text"
|
||||
description: "Information on the products you are selling, such as ICP (Ideal Customer Profile), Pitch, ..."
|
||||
required: false
|
||||
engagement_options:
|
||||
name: "engagement_options"
|
||||
type: "text"
|
||||
description: "Engagement options such as email, phone number, booking link, ..."
|
||||
tenant_language:
|
||||
name: "tenant_language"
|
||||
type: "str"
|
||||
description: "The language code used for internal information. If not provided, the tenant's default language will be used"
|
||||
required: false
|
||||
nr_of_questions:
|
||||
name: "nr_of_questions"
|
||||
type: "int"
|
||||
description: "The maximum number of questions to formulate extra questions"
|
||||
required: true
|
||||
default: 3
|
||||
arguments:
|
||||
language:
|
||||
name: "Language"
|
||||
type: "str"
|
||||
description: "Language code to be used for receiving questions and giving answers"
|
||||
required: true
|
||||
query:
|
||||
name: "query"
|
||||
type: "str"
|
||||
description: "Query or response to process"
|
||||
required: true
|
||||
identification:
|
||||
name: "identification"
|
||||
type: "text"
|
||||
description: "Initial identification information when available"
|
||||
required: false
|
||||
results:
|
||||
rag_output:
|
||||
answer:
|
||||
name: "answer"
|
||||
type: "str"
|
||||
description: "Answer to the query"
|
||||
required: true
|
||||
citations:
|
||||
name: "citations"
|
||||
type: "List[str]"
|
||||
description: "List of citations"
|
||||
required: false
|
||||
insufficient_info:
|
||||
name: "insufficient_info"
|
||||
type: "bool"
|
||||
description: "Whether or not the query is insufficient info"
|
||||
required: true
|
||||
spin:
|
||||
situation:
|
||||
name: "situation"
|
||||
type: "str"
|
||||
description: "A description of the customer's current situation / context"
|
||||
required: false
|
||||
problem:
|
||||
name: "problem"
|
||||
type: "str"
|
||||
description: "The current problems the customer is facing, for which he/she seeks a solution"
|
||||
required: false
|
||||
implication:
|
||||
name: "implication"
|
||||
type: "str"
|
||||
description: "A list of implications"
|
||||
required: false
|
||||
needs:
|
||||
name: "needs"
|
||||
type: "str"
|
||||
description: "A list of needs"
|
||||
required: false
|
||||
additional_info:
|
||||
name: "additional_info"
|
||||
type: "str"
|
||||
description: "Additional information that may be commercially interesting"
|
||||
required: false
|
||||
lead_info:
|
||||
lead_personal_info:
|
||||
name:
|
||||
name: "name"
|
||||
type: "str"
|
||||
description: "name of the lead"
|
||||
required: "true"
|
||||
job_title:
|
||||
name: "job_title"
|
||||
type: "str"
|
||||
description: "job title"
|
||||
required: false
|
||||
email:
|
||||
name: "email"
|
||||
type: "str"
|
||||
description: "lead email"
|
||||
required: "false"
|
||||
phone:
|
||||
name: "phone"
|
||||
type: "str"
|
||||
description: "lead phone"
|
||||
required: false
|
||||
additional_info:
|
||||
name: "additional_info"
|
||||
type: "str"
|
||||
description: "additional info on the lead"
|
||||
required: false
|
||||
lead_company_info:
|
||||
company_name:
|
||||
name: "company_name"
|
||||
type: "str"
|
||||
description: "Name of the lead company"
|
||||
required: false
|
||||
industry:
|
||||
name: "industry"
|
||||
type: "str"
|
||||
description: "The industry of the company"
|
||||
required: false
|
||||
company_size:
|
||||
name: "company_size"
|
||||
type: "int"
|
||||
description: "The size of the company"
|
||||
required: false
|
||||
company_website:
|
||||
name: "company_website"
|
||||
type: "str"
|
||||
description: "The main website for the company"
|
||||
required: false
|
||||
additional_info:
|
||||
name: "additional_info"
|
||||
type: "str"
|
||||
description: "Additional information that may be commercially interesting"
|
||||
required: false
|
||||
agents:
|
||||
- type: "RAG_AGENT"
|
||||
version: "1.0"
|
||||
- type: "RAG_COMMUNICATION_AGENT"
|
||||
version: "1.0"
|
||||
- type: "SPIN_DETECTION_AGENT"
|
||||
version: "1.0"
|
||||
- type: "SPIN_SALES_SPECIALIST_AGENT"
|
||||
version: "1.0"
|
||||
- type: "IDENTIFICATION_AGENT"
|
||||
version: "1.0"
|
||||
- type: "RAG_COMMUNICATION_AGENT"
|
||||
version: "1.0"
|
||||
tasks:
|
||||
- type: "RAG_TASK"
|
||||
version: "1.0"
|
||||
- type: "SPIN_DETECT_TASK"
|
||||
version: "1.0"
|
||||
- type: "SPIN_QUESTIONS_TASK"
|
||||
version: "1.0"
|
||||
- type: "IDENTIFICATION_DETECTION_TASK"
|
||||
version: "1.0"
|
||||
- type: "IDENTIFICATION_QUESTIONS_TASK"
|
||||
version: "1.0"
|
||||
- type: "RAG_CONSOLIDATION_TASK"
|
||||
version: "1.0"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
changes: "Initial version"
|
||||
description: "A Specialist that performs both Q&A as SPIN (Sales Process) activities"
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 387 KiB |
@@ -1,53 +0,0 @@
|
||||
version: 1.0.0
|
||||
name: "Standard RAG Specialist"
|
||||
framework: "langchain"
|
||||
chat: true
|
||||
configuration:
|
||||
specialist_context:
|
||||
name: "Specialist Context"
|
||||
type: "text"
|
||||
description: "The context to be used by the specialist."
|
||||
required: false
|
||||
temperature:
|
||||
name: "Temperature"
|
||||
type: "number"
|
||||
description: "The inference temperature to be used by the specialist."
|
||||
required: false
|
||||
default: 0.3
|
||||
arguments:
|
||||
language:
|
||||
name: "Language"
|
||||
type: "str"
|
||||
description: "Language code to be used for receiving questions and giving answers"
|
||||
required: true
|
||||
query:
|
||||
name: "query"
|
||||
type: "str"
|
||||
description: "Query to answer"
|
||||
required: true
|
||||
results:
|
||||
detailed_query:
|
||||
name: "detailed_query"
|
||||
type: "str"
|
||||
description: "The query detailed with the Chat Session History."
|
||||
required: true
|
||||
answer:
|
||||
name: "answer"
|
||||
type: "str"
|
||||
description: "Answer to the query"
|
||||
required: true
|
||||
citations:
|
||||
name: "citations"
|
||||
type: "List[str]"
|
||||
description: "List of citations"
|
||||
required: false
|
||||
insufficient_info:
|
||||
name: "insufficient_info"
|
||||
type: "bool"
|
||||
description: "Whether or not the query is insufficient info"
|
||||
required: true
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-01-08"
|
||||
changes: "Initial version"
|
||||
description: "A Specialist that performs standard Q&A"
|
||||
@@ -0,0 +1,29 @@
|
||||
version: "1.1.0"
|
||||
name: "Traicie KO Criteria Interview Definition Specialist"
|
||||
framework: "crewai"
|
||||
partner: "traicie"
|
||||
chat: false
|
||||
configuration:
|
||||
arguments:
|
||||
specialist_id:
|
||||
name: "specialist_id"
|
||||
description: "ID of the specialist for which to define KO Criteria Questions and Asnwers"
|
||||
type: "integer"
|
||||
required: true
|
||||
results:
|
||||
asset_id:
|
||||
name: "asset_id"
|
||||
description: "ID of the Asset containing questions and answers for each of the defined KO Criteria"
|
||||
type: "integer"
|
||||
required: true
|
||||
agents:
|
||||
- type: "TRAICIE_RECRUITER_AGENT"
|
||||
version: "1.0"
|
||||
tasks:
|
||||
- type: "TRAICIE_KO_CRITERIA_INTERVIEW_DEFINITION_TASK"
|
||||
version: "1.0"
|
||||
metadata:
|
||||
author: "Josako"
|
||||
date_added: "2025-07-01"
|
||||
changes: "Initial Version"
|
||||
description: "Specialist assisting in questions and answers definition for KO Criteria"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user