Business event tracing completed for both eveai_workers tasks and eveai_chat_workers tasks
This commit is contained in:
@@ -46,7 +46,7 @@ class BusinessEvent:
|
||||
|
||||
parent_span_id = self.span_id
|
||||
self.span_counter += 1
|
||||
new_span_id = f"{self.trace_id}-{self.span_counter}"
|
||||
new_span_id = str(uuid.uuid4())
|
||||
|
||||
# Save the current span info
|
||||
self.spans.append((self.span_id, self.span_name, self.parent_span_id))
|
||||
@@ -56,9 +56,12 @@ class BusinessEvent:
|
||||
self.span_name = span_name
|
||||
self.parent_span_id = parent_span_id
|
||||
|
||||
self.log(f"Starting span {span_name}")
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.log(f"Ending span {span_name}")
|
||||
# Restore the previous span info
|
||||
if self.spans:
|
||||
self.span_id, self.span_name, self.parent_span_id = self.spans.pop()
|
||||
@@ -103,7 +106,9 @@ class BusinessEvent:
|
||||
db.session.commit()
|
||||
|
||||
def __enter__(self):
|
||||
self.log(f'Starting Trace for {self.event_type}')
|
||||
return BusinessEventContext(self).__enter__()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.log(f'Ending Trace for {self.event_type}')
|
||||
return BusinessEventContext(self).__exit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
@@ -9,10 +9,12 @@ from typing import List, Any, Iterator
|
||||
from collections.abc import MutableMapping
|
||||
from openai import OpenAI
|
||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||
from portkey_ai.langchain.portkey_langchain_callback_handler import LangchainCallbackHandler
|
||||
|
||||
from common.models.document import EmbeddingSmallOpenAI, EmbeddingLargeOpenAI
|
||||
from common.models.user import Tenant
|
||||
from config.model_config import MODEL_CONFIG
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class CitedAnswer(BaseModel):
|
||||
@@ -91,87 +93,115 @@ class ModelVariables(MutableMapping):
|
||||
|
||||
@property
|
||||
def embedding_model(self):
|
||||
if self._embedding_model is None:
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
portkey_metadata = {'tenant_id': str(self.tenant.id), 'environment': environment}
|
||||
portkey_metadata = self.get_portkey_metadata()
|
||||
|
||||
if self._variables['embedding_provider'] == 'openai':
|
||||
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
provider='openai',
|
||||
metadata=portkey_metadata)
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
model = self._variables['embedding_model']
|
||||
self._embedding_model = OpenAIEmbeddings(api_key=api_key,
|
||||
model=model,
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
self._embedding_db_model = EmbeddingSmallOpenAI \
|
||||
if model == 'text-embedding-3-small' \
|
||||
else EmbeddingLargeOpenAI
|
||||
else:
|
||||
raise ValueError(f"Invalid embedding provider: {self._variables['embedding_provider']}")
|
||||
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
provider=self._variables['embedding_provider'],
|
||||
metadata=portkey_metadata)
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
model = self._variables['embedding_model']
|
||||
self._embedding_model = OpenAIEmbeddings(api_key=api_key,
|
||||
model=model,
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
self._embedding_db_model = EmbeddingSmallOpenAI \
|
||||
if model == 'text-embedding-3-small' \
|
||||
else EmbeddingLargeOpenAI
|
||||
|
||||
return self._embedding_model
|
||||
|
||||
@property
|
||||
def llm(self):
|
||||
if self._llm is None:
|
||||
self._initialize_llm()
|
||||
portkey_headers = self.get_portkey_headers_for_llm()
|
||||
api_key = self.get_api_key_for_llm()
|
||||
self._llm = ChatOpenAI(api_key=api_key,
|
||||
model=self._variables['llm_model'],
|
||||
temperature=self._variables['RAG_temperature'],
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
return self._llm
|
||||
|
||||
@property
|
||||
def llm_no_rag(self):
|
||||
if self._llm_no_rag is None:
|
||||
self._initialize_llm()
|
||||
portkey_headers = self.get_portkey_headers_for_llm()
|
||||
api_key = self.get_api_key_for_llm()
|
||||
self._llm_no_rag = ChatOpenAI(api_key=api_key,
|
||||
model=self._variables['llm_model'],
|
||||
temperature=self._variables['RAG_temperature'],
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
return self._llm_no_rag
|
||||
|
||||
def _initialize_llm(self):
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
portkey_metadata = {'tenant_id': str(self.tenant.id), 'environment': environment}
|
||||
def get_portkey_headers_for_llm(self):
|
||||
portkey_metadata = self.get_portkey_metadata()
|
||||
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
metadata=portkey_metadata,
|
||||
provider=self._variables['llm_provider'])
|
||||
return portkey_headers
|
||||
|
||||
def get_portkey_metadata(self):
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
portkey_metadata = {'tenant_id': str(self.tenant.id),
|
||||
'environment': environment,
|
||||
'trace_id': current_event.trace_id,
|
||||
'span_id': current_event.span_id,
|
||||
'span_name': current_event.span_name,
|
||||
'parent_span_id': current_event.parent_span_id,
|
||||
}
|
||||
return portkey_metadata
|
||||
|
||||
def get_api_key_for_llm(self):
|
||||
if self._variables['llm_provider'] == 'openai':
|
||||
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
metadata=portkey_metadata,
|
||||
provider='openai')
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
self._llm = ChatOpenAI(api_key=api_key,
|
||||
model=self._variables['llm_model'],
|
||||
temperature=self._variables['RAG_temperature'],
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
self._llm_no_rag = ChatOpenAI(api_key=api_key,
|
||||
model=self._variables['llm_model'],
|
||||
temperature=self._variables['no_RAG_temperature'],
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
self._variables['tool_calling_supported'] = self._variables['llm_model'] in ['gpt-4o', 'gpt-4o-mini']
|
||||
elif self._variables['llm_provider'] == 'anthropic':
|
||||
else: # self._variables['llm_provider'] == 'anthropic'
|
||||
api_key = os.getenv('ANTHROPIC_API_KEY')
|
||||
llm_model_ext = os.getenv('ANTHROPIC_LLM_VERSIONS', {}).get(self._variables['llm_model'])
|
||||
self._llm = ChatAnthropic(api_key=api_key,
|
||||
model=llm_model_ext,
|
||||
temperature=self._variables['RAG_temperature'])
|
||||
self._llm_no_rag = ChatAnthropic(api_key=api_key,
|
||||
model=llm_model_ext,
|
||||
temperature=self._variables['RAG_temperature'])
|
||||
self._variables['tool_calling_supported'] = True
|
||||
else:
|
||||
raise ValueError(f"Invalid chat provider: {self._variables['llm_provider']}")
|
||||
|
||||
return api_key
|
||||
|
||||
# def _initialize_llm(self):
|
||||
#
|
||||
#
|
||||
# if self._variables['llm_provider'] == 'openai':
|
||||
# portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
# metadata=portkey_metadata,
|
||||
# provider='openai')
|
||||
#
|
||||
# self._llm = ChatOpenAI(api_key=api_key,
|
||||
# model=self._variables['llm_model'],
|
||||
# temperature=self._variables['RAG_temperature'],
|
||||
# base_url=PORTKEY_GATEWAY_URL,
|
||||
# default_headers=portkey_headers)
|
||||
# self._llm_no_rag = ChatOpenAI(api_key=api_key,
|
||||
# model=self._variables['llm_model'],
|
||||
# temperature=self._variables['no_RAG_temperature'],
|
||||
# base_url=PORTKEY_GATEWAY_URL,
|
||||
# default_headers=portkey_headers)
|
||||
# self._variables['tool_calling_supported'] = self._variables['llm_model'] in ['gpt-4o', 'gpt-4o-mini']
|
||||
# elif self._variables['llm_provider'] == 'anthropic':
|
||||
# api_key = os.getenv('ANTHROPIC_API_KEY')
|
||||
# llm_model_ext = os.getenv('ANTHROPIC_LLM_VERSIONS', {}).get(self._variables['llm_model'])
|
||||
# self._llm = ChatAnthropic(api_key=api_key,
|
||||
# model=llm_model_ext,
|
||||
# temperature=self._variables['RAG_temperature'])
|
||||
# self._llm_no_rag = ChatAnthropic(api_key=api_key,
|
||||
# model=llm_model_ext,
|
||||
# temperature=self._variables['RAG_temperature'])
|
||||
# self._variables['tool_calling_supported'] = True
|
||||
# else:
|
||||
# raise ValueError(f"Invalid chat provider: {self._variables['llm_provider']}")
|
||||
|
||||
@property
|
||||
def transcription_client(self):
|
||||
if self._transcription_client is None:
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
portkey_metadata = {'tenant_id': str(self.tenant.id), 'environment': environment}
|
||||
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
metadata=portkey_metadata,
|
||||
provider='openai')
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
self._transcription_client = OpenAI(api_key=api_key,
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
self._variables['transcription_model'] = 'whisper-1'
|
||||
|
||||
environment = os.getenv('FLASK_ENV', 'development')
|
||||
portkey_metadata = self.get_portkey_metadata()
|
||||
portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'),
|
||||
metadata=portkey_metadata,
|
||||
provider='openai')
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
self._transcription_client = OpenAI(api_key=api_key,
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
default_headers=portkey_headers)
|
||||
self._variables['transcription_model'] = 'whisper-1'
|
||||
return self._transcription_client
|
||||
|
||||
@property
|
||||
|
||||
Reference in New Issue
Block a user