- Adding Prometheus and grafana services in development
- Adding Prometheus metrics to the business events - Ensure asynchronous behaviour of crewai specialists. - Adapt Business events to working in mixed synchronous / asynchronous contexts - Extend business events with specialist information - Started adding a grafana dashboard (TBC)
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import json
|
||||
from os import wait
|
||||
from typing import Optional, List
|
||||
@@ -136,8 +137,7 @@ class SpecialistExecutor(CrewAIBaseSpecialistExecutor):
|
||||
"nr_of_questions": self.specialist.configuration.get('nr_of_questions', ''),
|
||||
"identification": arguments.identification,
|
||||
}
|
||||
# crew_results = self.rag_crew.kickoff(inputs=flow_inputs)
|
||||
# current_app.logger.debug(f"Test Crew Output received: {crew_results}")
|
||||
|
||||
flow_results = self.flow.kickoff(inputs=flow_inputs)
|
||||
|
||||
flow_state = self.flow.state
|
||||
@@ -214,10 +214,10 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
return ""
|
||||
|
||||
@listen(process_inputs)
|
||||
def execute_rag(self):
|
||||
async def execute_rag(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
try:
|
||||
crew_output = self.rag_crew.kickoff(inputs=inputs)
|
||||
crew_output = await self.rag_crew.kickoff_async(inputs=inputs)
|
||||
self.specialist_executor.log_tuning("RAG Crew Output", crew_output.model_dump())
|
||||
output_pydantic = crew_output.pydantic
|
||||
if not output_pydantic:
|
||||
@@ -231,10 +231,11 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
raise e
|
||||
|
||||
@listen(process_inputs)
|
||||
def execute_spin(self):
|
||||
async def execute_spin(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
try:
|
||||
crew_output = self.spin_crew.kickoff(inputs=inputs)
|
||||
crew_output = await self.spin_crew.kickoff_async(inputs=inputs)
|
||||
current_app.logger.info(f"SPIN Crew Executed, output: {crew_output.model_dump()}")
|
||||
self.specialist_executor.log_tuning("Spin Crew Output", crew_output.model_dump())
|
||||
output_pydantic = crew_output.pydantic
|
||||
if not output_pydantic:
|
||||
@@ -248,10 +249,10 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
raise e
|
||||
|
||||
@listen(process_inputs)
|
||||
def execute_identification(self):
|
||||
async def execute_identification(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
try:
|
||||
crew_output = self.identification_crew.kickoff(inputs=inputs)
|
||||
crew_output = await self.identification_crew.kickoff_async(inputs=inputs)
|
||||
self.specialist_executor.log_tuning("Identification Crew Output", crew_output.model_dump())
|
||||
output_pydantic = crew_output.pydantic
|
||||
if not output_pydantic:
|
||||
@@ -265,7 +266,7 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
raise e
|
||||
|
||||
@listen(and_(execute_rag, execute_spin, execute_identification))
|
||||
def consolidate(self):
|
||||
async def consolidate(self):
|
||||
inputs = self.state.input.model_dump()
|
||||
if self.state.rag_output:
|
||||
inputs["prepared_answers"] = self.state.rag_output.answer
|
||||
@@ -277,7 +278,7 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
current_app.logger.debug(f"Additional Questions: {additional_questions}")
|
||||
inputs["additional_questions"] = additional_questions
|
||||
try:
|
||||
crew_output = self.rag_consolidation_crew.kickoff(inputs=inputs)
|
||||
crew_output = await self.rag_consolidation_crew.kickoff_async(inputs=inputs)
|
||||
self.specialist_executor.log_tuning("RAG Consolidation Crew Output", crew_output.model_dump())
|
||||
output_pydantic = crew_output.pydantic
|
||||
if not output_pydantic:
|
||||
@@ -290,13 +291,16 @@ class SPINFlow(EveAICrewAIFlow[SPINFlowState]):
|
||||
self.exception_raised = True
|
||||
raise e
|
||||
|
||||
def kickoff(self, inputs=None):
|
||||
with current_event.create_span("SPIN Specialist Execution"):
|
||||
async def execute_async(self, inputs=None):
|
||||
current_app.logger.debug(f"Async kickoff {self.name}")
|
||||
async with current_event.create_span_async("SPIN Specialist Execution"):
|
||||
self.specialist_executor.log_tuning("Inputs retrieved", inputs)
|
||||
self.state.input = SPINSpecialistInput.model_validate(inputs)
|
||||
self.specialist.update_progress("EveAI Flow Start", {"name": "SPIN"})
|
||||
try:
|
||||
result = super().kickoff()
|
||||
current_app.logger.debug(f"Async super kickoff {self.name}")
|
||||
result = await super().kickoff_async()
|
||||
current_app.logger.debug(f"Async super kickoff {self.name} ended")
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error kicking of Flow: {str(e)}")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user