- Improvements on audio processing to limit CPU and memory usage

- Removed Portkey from the equation, and defined explicit monitoring using Langchain native code
- Optimization of Business Event logging
This commit is contained in:
Josako
2024-10-02 14:11:46 +02:00
parent 883175b8f5
commit b700cfac64
13 changed files with 450 additions and 228 deletions

View File

@@ -0,0 +1,49 @@
import time
from langchain.callbacks.base import BaseCallbackHandler
from typing import Dict, Any, List
from langchain.schema import LLMResult
from common.utils.business_event_context import current_event
from flask import current_app
class LLMMetricsHandler(BaseCallbackHandler):
def __init__(self):
self.total_tokens: int = 0
self.prompt_tokens: int = 0
self.completion_tokens: int = 0
self.start_time: float = 0
self.end_time: float = 0
self.total_time: float = 0
def reset(self):
self.total_tokens = 0
self.prompt_tokens = 0
self.completion_tokens = 0
self.start_time = 0
self.end_time = 0
self.total_time = 0
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
self.start_time = time.time()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.end_time = time.time()
self.total_time = self.end_time - self.start_time
usage = response.llm_output.get('token_usage', {})
self.prompt_tokens += usage.get('prompt_tokens', 0)
self.completion_tokens += usage.get('completion_tokens', 0)
self.total_tokens = self.prompt_tokens + self.completion_tokens
metrics = self.get_metrics()
current_event.log_llm_metrics(metrics)
self.reset() # Reset for the next call
def get_metrics(self) -> Dict[str, int | float]:
return {
'total_tokens': self.total_tokens,
'prompt_tokens': self.prompt_tokens,
'completion_tokens': self.completion_tokens,
'time_elapsed': self.total_time,
'interaction_type': 'LLM',
}

View File

@@ -0,0 +1,51 @@
from langchain_openai import OpenAIEmbeddings
from typing import List, Any
import time
from common.utils.business_event_context import current_event
class TrackedOpenAIEmbeddings(OpenAIEmbeddings):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def embed_documents(self, texts: list[str]) -> list[list[float]]:
start_time = time.time()
result = super().embed_documents(texts)
end_time = time.time()
# Estimate token usage (OpenAI uses tiktoken for this)
import tiktoken
enc = tiktoken.encoding_for_model(self.model)
total_tokens = sum(len(enc.encode(text)) for text in texts)
metrics = {
'total_tokens': total_tokens,
'prompt_tokens': total_tokens, # For embeddings, all tokens are prompt tokens
'completion_tokens': 0,
'time_elapsed': end_time - start_time,
'interaction_type': 'Embedding',
}
current_event.log_llm_metrics(metrics)
return result
def embed_query(self, text: str) -> List[float]:
start_time = time.time()
result = super().embed_query(text)
end_time = time.time()
# Estimate token usage
import tiktoken
enc = tiktoken.encoding_for_model(self.model)
total_tokens = len(enc.encode(text))
metrics = {
'total_tokens': total_tokens,
'prompt_tokens': total_tokens,
'completion_tokens': 0,
'time_elapsed': end_time - start_time,
'interaction_type': 'Embedding',
}
current_event.log_llm_metrics(metrics)
return result

View File

@@ -0,0 +1,27 @@
import time
from common.utils.business_event_context import current_event
def tracked_transcribe(client, *args, **kwargs):
start_time = time.time()
# Extract the file and model from kwargs if present, otherwise use defaults
file = kwargs.get('file')
model = kwargs.get('model', 'whisper-1')
duration = kwargs.pop('duration', 600)
result = client.audio.transcriptions.create(*args, **kwargs)
end_time = time.time()
# Token usage for transcriptions is actually the duration in seconds we pass, as the whisper model is priced per second transcribed
metrics = {
'total_tokens': duration,
'prompt_tokens': 0, # For transcriptions, all tokens are considered "completion"
'completion_tokens': duration,
'time_elapsed': end_time - start_time,
'interaction_type': 'ASR',
}
current_event.log_llm_metrics(metrics)
return result

View File

@@ -17,5 +17,11 @@ class BusinessEventLog(db.Model):
chat_session_id = db.Column(db.String(50))
interaction_id = db.Column(db.Integer)
environment = db.Column(db.String(20))
llm_metrics_total_tokens = db.Column(db.Integer)
llm_metrics_prompt_tokens = db.Column(db.Integer)
llm_metrics_completion_tokens = db.Column(db.Integer)
llm_metrics_total_time = db.Column(db.Float)
llm_metrics_call_count = db.Column(db.Integer)
llm_interaction_type = db.Column(db.String(20))
message = db.Column(db.Text)
# Add any other fields relevant for invoicing or warnings

View File

@@ -30,6 +30,14 @@ class BusinessEvent:
self.environment = os.environ.get("FLASK_ENV", "development")
self.span_counter = 0
self.spans = []
self.llm_metrics = {
'total_tokens': 0,
'prompt_tokens': 0,
'completion_tokens': 0,
'total_time': 0,
'call_count': 0,
'interaction_type': None
}
def update_attribute(self, attribute: str, value: any):
if hasattr(self, attribute):
@@ -37,6 +45,22 @@ class BusinessEvent:
else:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute}'")
def update_llm_metrics(self, metrics: dict):
self.llm_metrics['total_tokens'] += metrics['total_tokens']
self.llm_metrics['prompt_tokens'] += metrics['prompt_tokens']
self.llm_metrics['completion_tokens'] += metrics['completion_tokens']
self.llm_metrics['total_time'] += metrics['time_elapsed']
self.llm_metrics['call_count'] += 1
self.llm_metrics['interaction_type'] = metrics['interaction_type']
def reset_llm_metrics(self):
self.llm_metrics['total_tokens'] = 0
self.llm_metrics['prompt_tokens'] = 0
self.llm_metrics['completion_tokens'] = 0
self.llm_metrics['total_time'] = 0
self.llm_metrics['call_count'] = 0
self.llm_metrics['interaction_type'] = None
@contextmanager
def create_span(self, span_name: str):
# The create_span method is designed to be used as a context manager. We want to perform some actions when
@@ -61,6 +85,9 @@ class BusinessEvent:
try:
yield
finally:
if self.llm_metrics['call_count'] > 0:
self.log_final_metrics()
self.reset_llm_metrics()
self.log(f"Ending span {span_name}")
# Restore the previous span info
if self.spans:
@@ -82,7 +109,7 @@ class BusinessEvent:
'document_version_id': self.document_version_id,
'chat_session_id': self.chat_session_id,
'interaction_id': self.interaction_id,
'environment': self.environment
'environment': self.environment,
}
# log to Graylog
getattr(logger, level)(message, extra=log_data)
@@ -105,10 +132,108 @@ class BusinessEvent:
db.session.add(event_log)
db.session.commit()
def log_llm_metrics(self, metrics: dict, level: str = 'info'):
self.update_llm_metrics(metrics)
message = "LLM Metrics"
logger = logging.getLogger('business_events')
log_data = {
'event_type': self.event_type,
'tenant_id': self.tenant_id,
'trace_id': self.trace_id,
'span_id': self.span_id,
'span_name': self.span_name,
'parent_span_id': self.parent_span_id,
'document_version_id': self.document_version_id,
'chat_session_id': self.chat_session_id,
'interaction_id': self.interaction_id,
'environment': self.environment,
'llm_metrics_total_tokens': metrics['total_tokens'],
'llm_metrics_prompt_tokens': metrics['prompt_tokens'],
'llm_metrics_completion_tokens': metrics['completion_tokens'],
'llm_metrics_total_time': metrics['time_elapsed'],
'llm_interaction_type': metrics['interaction_type'],
}
# log to Graylog
getattr(logger, level)(message, extra=log_data)
# Log to database
event_log = BusinessEventLog(
timestamp=dt.now(tz=tz.utc),
event_type=self.event_type,
tenant_id=self.tenant_id,
trace_id=self.trace_id,
span_id=self.span_id,
span_name=self.span_name,
parent_span_id=self.parent_span_id,
document_version_id=self.document_version_id,
chat_session_id=self.chat_session_id,
interaction_id=self.interaction_id,
environment=self.environment,
llm_metrics_total_tokens=metrics['total_tokens'],
llm_metrics_prompt_tokens=metrics['prompt_tokens'],
llm_metrics_completion_tokens=metrics['completion_tokens'],
llm_metrics_total_time=metrics['time_elapsed'],
llm_interaction_type=metrics['interaction_type'],
message=message
)
db.session.add(event_log)
db.session.commit()
def log_final_metrics(self, level: str = 'info'):
logger = logging.getLogger('business_events')
message = "Final LLM Metrics"
log_data = {
'event_type': self.event_type,
'tenant_id': self.tenant_id,
'trace_id': self.trace_id,
'span_id': self.span_id,
'span_name': self.span_name,
'parent_span_id': self.parent_span_id,
'document_version_id': self.document_version_id,
'chat_session_id': self.chat_session_id,
'interaction_id': self.interaction_id,
'environment': self.environment,
'llm_metrics_total_tokens': self.llm_metrics['total_tokens'],
'llm_metrics_prompt_tokens': self.llm_metrics['prompt_tokens'],
'llm_metrics_completion_tokens': self.llm_metrics['completion_tokens'],
'llm_metrics_total_time': self.llm_metrics['total_time'],
'llm_metrics_call_count': self.llm_metrics['call_count'],
'llm_interaction_type': self.llm_metrics['interaction_type'],
}
# log to Graylog
getattr(logger, level)(message, extra=log_data)
# Log to database
event_log = BusinessEventLog(
timestamp=dt.now(tz=tz.utc),
event_type=self.event_type,
tenant_id=self.tenant_id,
trace_id=self.trace_id,
span_id=self.span_id,
span_name=self.span_name,
parent_span_id=self.parent_span_id,
document_version_id=self.document_version_id,
chat_session_id=self.chat_session_id,
interaction_id=self.interaction_id,
environment=self.environment,
llm_metrics_total_tokens=self.llm_metrics['total_tokens'],
llm_metrics_prompt_tokens=self.llm_metrics['prompt_tokens'],
llm_metrics_completion_tokens=self.llm_metrics['completion_tokens'],
llm_metrics_total_time=self.llm_metrics['total_time'],
llm_metrics_call_count=self.llm_metrics['call_count'],
llm_interaction_type=self.llm_metrics['interaction_type'],
message=message
)
db.session.add(event_log)
db.session.commit()
def __enter__(self):
self.log(f'Starting Trace for {self.event_type}')
return BusinessEventContext(self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.llm_metrics['call_count'] > 0:
self.log_final_metrics()
self.reset_llm_metrics()
self.log(f'Ending Trace for {self.event_type}')
return BusinessEventContext(self).__exit__(exc_type, exc_val, exc_tb)

View File

@@ -11,6 +11,9 @@ from openai import OpenAI
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
from portkey_ai.langchain.portkey_langchain_callback_handler import LangchainCallbackHandler
from common.langchain.llm_metrics_handler import LLMMetricsHandler
from common.langchain.tracked_openai_embeddings import TrackedOpenAIEmbeddings
from common.langchain.tracked_transcribe import tracked_transcribe
from common.models.document import EmbeddingSmallOpenAI, EmbeddingLargeOpenAI
from common.models.user import Tenant
from config.model_config import MODEL_CONFIG
@@ -48,6 +51,8 @@ class ModelVariables(MutableMapping):
self._transcription_client = None
self._prompt_templates = {}
self._embedding_db_model = None
self.llm_metrics_handler = LLMMetricsHandler()
self._transcription_client = None
def _initialize_variables(self):
variables = {}
@@ -89,26 +94,20 @@ class ModelVariables(MutableMapping):
if variables['tool_calling_supported']:
variables['cited_answer_cls'] = CitedAnswer
variables['max_compression_duration'] = current_app.config['MAX_COMPRESSION_DURATION']
variables['max_transcription_duration'] = current_app.config['MAX_TRANSCRIPTION_DURATION']
variables['compression_cpu_limit'] = current_app.config['COMPRESSION_CPU_LIMIT']
variables['compression_process_delay'] = current_app.config['COMPRESSION_PROCESS_DELAY']
return variables
@property
def embedding_model(self):
portkey_metadata = self.get_portkey_metadata()
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
provider=self._variables['embedding_provider'],
metadata=portkey_metadata,
trace_id=current_event.trace_id,
span_id=current_event.span_id,
span_name=current_event.span_name,
parent_span_id=current_event.parent_span_id
)
api_key = os.getenv('OPENAI_API_KEY')
model = self._variables['embedding_model']
self._embedding_model = OpenAIEmbeddings(api_key=api_key,
model=model,
base_url=PORTKEY_GATEWAY_URL,
default_headers=portkey_headers)
self._embedding_model = TrackedOpenAIEmbeddings(api_key=api_key,
model=model,
)
self._embedding_db_model = EmbeddingSmallOpenAI \
if model == 'text-embedding-3-small' \
else EmbeddingLargeOpenAI
@@ -117,108 +116,40 @@ class ModelVariables(MutableMapping):
@property
def llm(self):
portkey_headers = self.get_portkey_headers_for_llm()
api_key = self.get_api_key_for_llm()
self._llm = ChatOpenAI(api_key=api_key,
model=self._variables['llm_model'],
temperature=self._variables['RAG_temperature'],
base_url=PORTKEY_GATEWAY_URL,
default_headers=portkey_headers)
callbacks=[self.llm_metrics_handler])
return self._llm
@property
def llm_no_rag(self):
portkey_headers = self.get_portkey_headers_for_llm()
api_key = self.get_api_key_for_llm()
self._llm_no_rag = ChatOpenAI(api_key=api_key,
model=self._variables['llm_model'],
temperature=self._variables['RAG_temperature'],
base_url=PORTKEY_GATEWAY_URL,
default_headers=portkey_headers)
callbacks=[self.llm_metrics_handler])
return self._llm_no_rag
def get_portkey_headers_for_llm(self):
portkey_metadata = self.get_portkey_metadata()
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
metadata=portkey_metadata,
provider=self._variables['llm_provider'],
trace_id=current_event.trace_id,
span_id=current_event.span_id,
span_name=current_event.span_name,
parent_span_id=current_event.parent_span_id
)
return portkey_headers
def get_portkey_metadata(self):
environment = os.getenv('FLASK_ENV', 'development')
portkey_metadata = {'tenant_id': str(self.tenant.id),
'environment': environment,
'trace_id': current_event.trace_id,
'span_id': current_event.span_id,
'span_name': current_event.span_name,
'parent_span_id': current_event.parent_span_id,
}
return portkey_metadata
def get_api_key_for_llm(self):
if self._variables['llm_provider'] == 'openai':
api_key = os.getenv('OPENAI_API_KEY')
else: # self._variables['llm_provider'] == 'anthropic'
else: # self._variables['llm_provider'] == 'anthropic'
api_key = os.getenv('ANTHROPIC_API_KEY')
return api_key
# def _initialize_llm(self):
#
#
# if self._variables['llm_provider'] == 'openai':
# portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
# metadata=portkey_metadata,
# provider='openai')
#
# self._llm = ChatOpenAI(api_key=api_key,
# model=self._variables['llm_model'],
# temperature=self._variables['RAG_temperature'],
# base_url=PORTKEY_GATEWAY_URL,
# default_headers=portkey_headers)
# self._llm_no_rag = ChatOpenAI(api_key=api_key,
# model=self._variables['llm_model'],
# temperature=self._variables['no_RAG_temperature'],
# base_url=PORTKEY_GATEWAY_URL,
# default_headers=portkey_headers)
# self._variables['tool_calling_supported'] = self._variables['llm_model'] in ['gpt-4o', 'gpt-4o-mini']
# elif self._variables['llm_provider'] == 'anthropic':
# api_key = os.getenv('ANTHROPIC_API_KEY')
# llm_model_ext = os.getenv('ANTHROPIC_LLM_VERSIONS', {}).get(self._variables['llm_model'])
# self._llm = ChatAnthropic(api_key=api_key,
# model=llm_model_ext,
# temperature=self._variables['RAG_temperature'])
# self._llm_no_rag = ChatAnthropic(api_key=api_key,
# model=llm_model_ext,
# temperature=self._variables['RAG_temperature'])
# self._variables['tool_calling_supported'] = True
# else:
# raise ValueError(f"Invalid chat provider: {self._variables['llm_provider']}")
@property
def transcription_client(self):
environment = os.getenv('FLASK_ENV', 'development')
portkey_metadata = self.get_portkey_metadata()
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
metadata=portkey_metadata,
provider='openai',
trace_id=current_event.trace_id,
span_id=current_event.span_id,
span_name=current_event.span_name,
parent_span_id=current_event.parent_span_id
)
api_key = os.getenv('OPENAI_API_KEY')
self._transcription_client = OpenAI(api_key=api_key,
base_url=PORTKEY_GATEWAY_URL,
default_headers=portkey_headers)
self._transcription_client = OpenAI(api_key=api_key, )
self._variables['transcription_model'] = 'whisper-1'
return self._transcription_client
def transcribe(self, *args, **kwargs):
return tracked_transcribe(self._transcription_client, *args, **kwargs)
@property
def embedding_db_model(self):
if self._embedding_db_model is None:

View File

@@ -1,99 +0,0 @@
import requests
import json
from typing import Optional
# Define a function to make the GET request
def get_metadata_grouped_data(
api_key: str,
metadata_key: str,
time_of_generation_min: Optional[str] = None,
time_of_generation_max: Optional[str] = None,
total_units_min: Optional[int] = None,
total_units_max: Optional[int] = None,
cost_min: Optional[float] = None,
cost_max: Optional[float] = None,
prompt_token_min: Optional[int] = None,
prompt_token_max: Optional[int] = None,
completion_token_min: Optional[int] = None,
completion_token_max: Optional[int] = None,
status_code: Optional[str] = None,
weighted_feedback_min: Optional[float] = None,
weighted_feedback_max: Optional[float] = None,
virtual_keys: Optional[str] = None,
configs: Optional[str] = None,
workspace_slug: Optional[str] = None,
api_key_ids: Optional[str] = None,
current_page: Optional[int] = 1,
page_size: Optional[int] = 20,
metadata: Optional[str] = None,
ai_org_model: Optional[str] = None,
trace_id: Optional[str] = None,
span_id: Optional[str] = None,
):
url = f"https://api.portkey.ai/v1/analytics/groups/metadata/{metadata_key}"
# Set up query parameters
params = {
"time_of_generation_min": time_of_generation_min,
"time_of_generation_max": time_of_generation_max,
"total_units_min": total_units_min,
"total_units_max": total_units_max,
"cost_min": cost_min,
"cost_max": cost_max,
"prompt_token_min": prompt_token_min,
"prompt_token_max": prompt_token_max,
"completion_token_min": completion_token_min,
"completion_token_max": completion_token_max,
"status_code": status_code,
"weighted_feedback_min": weighted_feedback_min,
"weighted_feedback_max": weighted_feedback_max,
"virtual_keys": virtual_keys,
"configs": configs,
"workspace_slug": workspace_slug,
"api_key_ids": api_key_ids,
"current_page": current_page,
"page_size": page_size,
"metadata": metadata,
"ai_org_model": ai_org_model,
"trace_id": trace_id,
"span_id": span_id,
}
# Remove any keys with None values
params = {k: v for k, v in params.items() if v is not None}
# Set up the headers
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
# Make the GET request
response = requests.get(url, headers=headers, params=params)
# Check for successful response
if response.status_code == 200:
return response.json() # Return JSON data
else:
response.raise_for_status() # Raise an exception for errors
# Example usage
# Replace 'your_api_key' and 'your_metadata_key' with actual values
api_key = 'your_api_key'
metadata_key = 'your_metadata_key'
try:
data = get_metadata_grouped_data(
api_key=api_key,
metadata_key=metadata_key,
time_of_generation_min="2024-08-23T15:50:23+05:30",
time_of_generation_max="2024-09-23T15:50:23+05:30",
total_units_min=100,
total_units_max=1000,
cost_min=10,
cost_max=100,
status_code="200,201"
)
print(json.dumps(data, indent=4))
except Exception as e:
print(f"Error occurred: {str(e)}")