- adding usage to specialist execution
- Correcting implementation of usage - Removed some obsolete debug statements
This commit is contained in:
@@ -198,7 +198,13 @@ class SPINFlowState(EveAIFlowState):
|
||||
|
||||
|
||||
class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
def __init__(self, specialist_executor, rag_crew, spin_crew, identification_crew, rag_consolidation_crew, **kwargs):
|
||||
def __init__(self,
|
||||
specialist_executor: CrewAIBaseSpecialistExecutor,
|
||||
rag_crew: EveAICrewAICrew,
|
||||
spin_crew: EveAICrewAICrew,
|
||||
identification_crew: EveAICrewAICrew,
|
||||
rag_consolidation_crew: EveAICrewAICrew,
|
||||
**kwargs):
|
||||
super().__init__(specialist_executor, "SPIN Specialist Flow", **kwargs)
|
||||
self.specialist_executor = specialist_executor
|
||||
self.rag_crew = rag_crew
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
from crewai import Agent, Task, Crew, Flow
|
||||
from crewai.agents.parser import AgentAction, AgentFinish
|
||||
from crewai.crews import CrewOutput
|
||||
from crewai.tools import BaseTool
|
||||
from flask import current_app
|
||||
from pydantic import BaseModel, create_model, Field, ConfigDict
|
||||
from typing import Dict, Type, get_type_hints, Optional, List, Any, Callable
|
||||
|
||||
from common.utils.business_event_context import current_event
|
||||
|
||||
|
||||
class EveAICrewAIAgent(Agent):
|
||||
specialist: Any = Field(default=None, exclude=True)
|
||||
@@ -36,26 +40,27 @@ class EveAICrewAIAgent(Agent):
|
||||
Returns:
|
||||
Output of the agent
|
||||
"""
|
||||
self.specialist.log_tuning("EveAI Agent Task Start",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
})
|
||||
self.specialist.update_progress("EveAI Agent Task Start",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
})
|
||||
with current_event.create_span(f"Task Execution {task.name} by {self.name}"):
|
||||
self.specialist.log_tuning("EveAI Agent Task Start",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
})
|
||||
self.specialist.update_progress("EveAI Agent Task Start",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
})
|
||||
|
||||
result = super().execute_task(task, context, tools)
|
||||
result = super().execute_task(task, context, tools)
|
||||
|
||||
self.specialist.log_tuning("EveAI Agent Task Complete",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
'result': result,
|
||||
})
|
||||
self.specialist.update_progress("EveAI Agent Task Complete",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
})
|
||||
self.specialist.log_tuning("EveAI Agent Task Complete",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
'result': result,
|
||||
})
|
||||
self.specialist.update_progress("EveAI Agent Task Complete",
|
||||
{"name": self.name,
|
||||
'task': task.name,
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
@@ -75,26 +80,6 @@ class EveAICrewAITask(Task):
|
||||
self.specialist.update_progress("EveAI Task Initialisation", {"name": name})
|
||||
|
||||
|
||||
# def create_task_callback(task: EveAICrewAITask):
|
||||
# def task_callback(output):
|
||||
# # Todo Check if required with new version of crewai
|
||||
# if isinstance(output, BaseModel):
|
||||
# task.specialist.log_tuning(f"TASK CALLBACK: EveAICrewAITask {task.name} Output:",
|
||||
# {'output': output.model_dump()})
|
||||
# if output.output_format == "pydantic" and not output.pydantic:
|
||||
# try:
|
||||
# raw_json = json.loads(output.raw)
|
||||
# output_pydantic = task.output_pydantic(**raw_json)
|
||||
# output.pydantic = output_pydantic
|
||||
# task.specialist.log_tuning(f"TASK CALLBACK: EveAICrewAITask {task.name} Converted Output",
|
||||
# {'output': output_pydantic.model_dump()})
|
||||
# except Exception as e:
|
||||
# task.specialist.log_tuning(f"TASK CALLBACK: EveAICrewAITask {task.name} Output Conversion Error: "
|
||||
# f"{str(e)}", {})
|
||||
#
|
||||
# return task_callback
|
||||
|
||||
|
||||
class EveAICrewAICrew(Crew):
|
||||
specialist: Any = Field(default=None, exclude=True)
|
||||
name: str = Field(default=None, exclude=True)
|
||||
@@ -107,6 +92,24 @@ class EveAICrewAICrew(Crew):
|
||||
self.specialist.log_tuning("Initializing EveAICrewAICrew", {"name": self.name})
|
||||
self.specialist.update_progress("EveAI Crew Initialisation", {"name": self.name})
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
with current_event.create_span(f"Crew {self.name} kickoff"):
|
||||
start_time = time.time()
|
||||
results = super().kickoff(inputs)
|
||||
end_time = time.time()
|
||||
metrics = {
|
||||
"total_tokens": self.usage_metrics.total_tokens,
|
||||
"prompt_tokens": self.usage_metrics.prompt_tokens,
|
||||
"completion_tokens": self.usage_metrics.completion_tokens,
|
||||
"time_elapsed": end_time - start_time,
|
||||
"interaction_type": "Crew Execution"
|
||||
}
|
||||
current_event.log_llm_metrics(metrics)
|
||||
|
||||
return results
|
||||
|
||||
class EveAICrewAIFlow(Flow):
|
||||
specialist: Any = Field(default=None, exclude=True)
|
||||
|
||||
@@ -244,11 +244,6 @@ def execute_specialist(self, tenant_id: int, specialist_id: int, arguments: Dict
|
||||
session_id,
|
||||
create_params={'timezone': user_timezone}
|
||||
)
|
||||
if cached_session:
|
||||
current_app.logger.debug(f"Cached Session successfully retrieved for {session_id}: {cached_session.id}")
|
||||
else:
|
||||
current_app.logger.debug(f"No Cached Session retrieved for {session_id}")
|
||||
|
||||
# Get specialist from database
|
||||
specialist = Specialist.query.get_or_404(specialist_id)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user