- Optimisation and streamlining of messages in ExecutionProgressTracker (ept)
- Adaptation of ProgressTracker to handle these optimised messages - Hardening SSE-streaming in eveai_chat_client
This commit is contained in:
@@ -10,6 +10,13 @@ import time
|
||||
class ExecutionProgressTracker:
|
||||
"""Tracks progress of specialist executions using Redis"""
|
||||
|
||||
# Normalized processing types and aliases
|
||||
PT_COMPLETE = 'EVEAI_COMPLETE'
|
||||
PT_ERROR = 'EVEAI_ERROR'
|
||||
|
||||
_COMPLETE_ALIASES = {'EveAI Specialist Complete', 'Task Complete', 'task complete'}
|
||||
_ERROR_ALIASES = {'EveAI Specialist Error', 'Task Error', 'task error'}
|
||||
|
||||
def __init__(self):
|
||||
try:
|
||||
# Use shared pubsub pool (lazy connect; no eager ping)
|
||||
@@ -40,6 +47,16 @@ class ExecutionProgressTracker:
|
||||
# Exhausted retries
|
||||
raise last_exc
|
||||
|
||||
def _normalize_processing_type(self, processing_type: str) -> str:
|
||||
if not processing_type:
|
||||
return processing_type
|
||||
p = str(processing_type).strip()
|
||||
if p in self._COMPLETE_ALIASES:
|
||||
return self.PT_COMPLETE
|
||||
if p in self._ERROR_ALIASES:
|
||||
return self.PT_ERROR
|
||||
return p
|
||||
|
||||
def send_update(self, ctask_id: str, processing_type: str, data: dict):
|
||||
"""Send an update about execution progress"""
|
||||
try:
|
||||
@@ -47,7 +64,7 @@ class ExecutionProgressTracker:
|
||||
f"{data}")
|
||||
key = self._get_key(ctask_id)
|
||||
|
||||
|
||||
processing_type = self._normalize_processing_type(processing_type)
|
||||
update = {
|
||||
'processing_type': processing_type,
|
||||
'data': data,
|
||||
@@ -96,14 +113,16 @@ class ExecutionProgressTracker:
|
||||
self._retry(lambda: pubsub.subscribe(key))
|
||||
|
||||
try:
|
||||
# Hint client reconnect interval (optional but helpful)
|
||||
yield "retry: 3000\n\n"
|
||||
|
||||
# First yield any existing updates
|
||||
length = self._retry(lambda: self.redis.llen(key))
|
||||
if length > 0:
|
||||
updates = self._retry(lambda: self.redis.lrange(key, 0, -1))
|
||||
for update in updates:
|
||||
update_data = json.loads(update.decode('utf-8'))
|
||||
# Use processing_type for the event
|
||||
yield f"event: {update_data['processing_type']}\n"
|
||||
update_data['processing_type'] = self._normalize_processing_type(update_data.get('processing_type'))
|
||||
yield f"data: {json.dumps(update_data)}\n\n"
|
||||
|
||||
# Then listen for new updates
|
||||
@@ -121,13 +140,20 @@ class ExecutionProgressTracker:
|
||||
|
||||
if message['type'] == 'message': # This is Redis pub/sub type
|
||||
update_data = json.loads(message['data'].decode('utf-8'))
|
||||
yield f"data: {message['data'].decode('utf-8')}\n\n"
|
||||
update_data['processing_type'] = self._normalize_processing_type(update_data.get('processing_type'))
|
||||
yield f"data: {json.dumps(update_data)}\n\n"
|
||||
|
||||
# Check processing_type for completion
|
||||
if update_data['processing_type'] in ['Task Complete', 'Task Error', 'EveAI Specialist Complete']:
|
||||
# Unified completion check
|
||||
if update_data['processing_type'] in [self.PT_COMPLETE, self.PT_ERROR]:
|
||||
# Give proxies/clients a chance to flush
|
||||
yield ": closing\n\n"
|
||||
break
|
||||
finally:
|
||||
try:
|
||||
pubsub.unsubscribe()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
pubsub.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user