From 883175b8f5190cfb838df3c0b55291ef666ebffc Mon Sep 17 00:00:00 2001 From: Josako Date: Tue, 1 Oct 2024 08:01:59 +0200 Subject: [PATCH] - Portkey log retrieval started - flower container added (dev and prod) --- common/utils/model_utils.py | 21 ++++++-- common/utils/portkey_utils.py | 99 ++++++++++++++++++++++++++++++++++ docker/build_and_push_eveai.sh | 4 +- docker/compose_dev.yaml | 18 +++++++ docker/compose_stackhero.yaml | 13 ++++- docker/flower/Dockerfile | 34 ++++++++++++ nginx/nginx.conf | 3 +- requirements.txt | 2 +- scripts/start_flower.sh | 33 +++++++++--- 9 files changed, 211 insertions(+), 16 deletions(-) create mode 100644 common/utils/portkey_utils.py create mode 100644 docker/flower/Dockerfile mode change 100755 => 100644 scripts/start_flower.sh diff --git a/common/utils/model_utils.py b/common/utils/model_utils.py index 2d27f4f..cd391e2 100644 --- a/common/utils/model_utils.py +++ b/common/utils/model_utils.py @@ -97,7 +97,12 @@ class ModelVariables(MutableMapping): portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'), provider=self._variables['embedding_provider'], - metadata=portkey_metadata) + metadata=portkey_metadata, + trace_id=current_event.trace_id, + span_id=current_event.span_id, + span_name=current_event.span_name, + parent_span_id=current_event.parent_span_id + ) api_key = os.getenv('OPENAI_API_KEY') model = self._variables['embedding_model'] self._embedding_model = OpenAIEmbeddings(api_key=api_key, @@ -136,7 +141,12 @@ class ModelVariables(MutableMapping): portkey_metadata = self.get_portkey_metadata() portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'), metadata=portkey_metadata, - provider=self._variables['llm_provider']) + provider=self._variables['llm_provider'], + trace_id=current_event.trace_id, + span_id=current_event.span_id, + span_name=current_event.span_name, + parent_span_id=current_event.parent_span_id + ) return portkey_headers def get_portkey_metadata(self): @@ -196,7 +206,12 @@ class ModelVariables(MutableMapping): portkey_metadata = self.get_portkey_metadata() portkey_headers = createHeaders(api_key=os.getenv('PORTKEY_API_KEY'), metadata=portkey_metadata, - provider='openai') + provider='openai', + trace_id=current_event.trace_id, + span_id=current_event.span_id, + span_name=current_event.span_name, + parent_span_id=current_event.parent_span_id + ) api_key = os.getenv('OPENAI_API_KEY') self._transcription_client = OpenAI(api_key=api_key, base_url=PORTKEY_GATEWAY_URL, diff --git a/common/utils/portkey_utils.py b/common/utils/portkey_utils.py new file mode 100644 index 0000000..02e4758 --- /dev/null +++ b/common/utils/portkey_utils.py @@ -0,0 +1,99 @@ +import requests +import json +from typing import Optional + +# Define a function to make the GET request +def get_metadata_grouped_data( + api_key: str, + metadata_key: str, + time_of_generation_min: Optional[str] = None, + time_of_generation_max: Optional[str] = None, + total_units_min: Optional[int] = None, + total_units_max: Optional[int] = None, + cost_min: Optional[float] = None, + cost_max: Optional[float] = None, + prompt_token_min: Optional[int] = None, + prompt_token_max: Optional[int] = None, + completion_token_min: Optional[int] = None, + completion_token_max: Optional[int] = None, + status_code: Optional[str] = None, + weighted_feedback_min: Optional[float] = None, + weighted_feedback_max: Optional[float] = None, + virtual_keys: Optional[str] = None, + configs: Optional[str] = None, + workspace_slug: Optional[str] = None, + api_key_ids: Optional[str] = None, + current_page: Optional[int] = 1, + page_size: Optional[int] = 20, + metadata: Optional[str] = None, + ai_org_model: Optional[str] = None, + trace_id: Optional[str] = None, + span_id: Optional[str] = None, +): + url = f"https://api.portkey.ai/v1/analytics/groups/metadata/{metadata_key}" + + # Set up query parameters + params = { + "time_of_generation_min": time_of_generation_min, + "time_of_generation_max": time_of_generation_max, + "total_units_min": total_units_min, + "total_units_max": total_units_max, + "cost_min": cost_min, + "cost_max": cost_max, + "prompt_token_min": prompt_token_min, + "prompt_token_max": prompt_token_max, + "completion_token_min": completion_token_min, + "completion_token_max": completion_token_max, + "status_code": status_code, + "weighted_feedback_min": weighted_feedback_min, + "weighted_feedback_max": weighted_feedback_max, + "virtual_keys": virtual_keys, + "configs": configs, + "workspace_slug": workspace_slug, + "api_key_ids": api_key_ids, + "current_page": current_page, + "page_size": page_size, + "metadata": metadata, + "ai_org_model": ai_org_model, + "trace_id": trace_id, + "span_id": span_id, + } + + # Remove any keys with None values + params = {k: v for k, v in params.items() if v is not None} + + # Set up the headers + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json" + } + + # Make the GET request + response = requests.get(url, headers=headers, params=params) + + # Check for successful response + if response.status_code == 200: + return response.json() # Return JSON data + else: + response.raise_for_status() # Raise an exception for errors + +# Example usage +# Replace 'your_api_key' and 'your_metadata_key' with actual values +api_key = 'your_api_key' +metadata_key = 'your_metadata_key' + +try: + data = get_metadata_grouped_data( + api_key=api_key, + metadata_key=metadata_key, + time_of_generation_min="2024-08-23T15:50:23+05:30", + time_of_generation_max="2024-09-23T15:50:23+05:30", + total_units_min=100, + total_units_max=1000, + cost_min=10, + cost_max=100, + status_code="200,201" + ) + print(json.dumps(data, indent=4)) +except Exception as e: + print(f"Error occurred: {str(e)}") \ No newline at end of file diff --git a/docker/build_and_push_eveai.sh b/docker/build_and_push_eveai.sh index fd49adc..b0880c9 100755 --- a/docker/build_and_push_eveai.sh +++ b/docker/build_and_push_eveai.sh @@ -141,7 +141,7 @@ if [ $# -eq 0 ]; then SERVICES=() while IFS= read -r line; do SERVICES+=("$line") - done < <(yq e '.services | keys | .[]' compose_dev.yaml | grep -E '^(nginx|eveai_)') + done < <(yq e '.services | keys | .[]' compose_dev.yaml | grep -E '^(nginx|eveai_|flower)') else SERVICES=("$@") fi @@ -158,7 +158,7 @@ docker buildx use eveai_builder # Loop through services for SERVICE in "${SERVICES[@]}"; do - if [[ "$SERVICE" == "nginx" || "$SERVICE" == eveai_* ]]; then + if [[ "$SERVICE" == "nginx" || "$SERVICE" == eveai_* || "$SERVICE" == "flower" ]]; then if process_service "$SERVICE"; then echo "Successfully processed $SERVICE" else diff --git a/docker/compose_dev.yaml b/docker/compose_dev.yaml index 1c60433..d0d3c64 100644 --- a/docker/compose_dev.yaml +++ b/docker/compose_dev.yaml @@ -22,6 +22,8 @@ x-common-variables: &common-variables MAIL_PASSWORD: '$$6xsWGbNtx$$CFMQZqc*' MAIL_SERVER: mail.flow-it.net MAIL_PORT: 465 + REDIS_URL: redis + REDIS_PORT: '6379' OPENAI_API_KEY: 'sk-proj-8R0jWzwjL7PeoPyMhJTZT3BlbkFJLb6HfRB2Hr9cEVFWEhU7' GROQ_API_KEY: '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' ANTHROPIC_API_KEY: 'sk-ant-api03-c2TmkzbReeGhXBO5JxNH6BJNylRDonc9GmZd0eRbrvyekec2' @@ -265,6 +267,22 @@ services: networks: - eveai-network + flower: + image: josakola/flower:latest + build: + context: .. + dockerfile: ./docker/flower/Dockerfile + environment: + <<: *common-variables + volumes: + - ../scripts:/app/scripts + ports: + - "5555:5555" + depends_on: + - redis + networks: + - eveai-network + minio: image: minio/minio ports: diff --git a/docker/compose_stackhero.yaml b/docker/compose_stackhero.yaml index 6e5020c..e72ecea 100644 --- a/docker/compose_stackhero.yaml +++ b/docker/compose_stackhero.yaml @@ -21,11 +21,13 @@ x-common-variables: &common-variables MAIL_USERNAME: 'evie_admin@askeveai.com' MAIL_PASSWORD: 's5D%R#y^v!s&6Z^i0k&' MAIL_SERVER: mail.askeveai.com - MAIL_PORT: 465 + MAIL_PORT: '465' REDIS_USER: eveai REDIS_PASS: 'jHliZwGD36sONgbm0fc6SOpzLbknqq4RNF8K' REDIS_URL: 8bciqc.stackhero-network.com REDIS_PORT: '9961' + FLOWER_USER: 'Felucia' + FLOWER_PASSWORD: 'Jungles' OPENAI_API_KEY: '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' GROQ_API_KEY: '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' ANTHROPIC_API_KEY: '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' @@ -143,6 +145,15 @@ services: networks: - eveai-network + flower: + image: josakola/flower:latest + environment: + <<: *common-variables + ports: + - "5555:5555" + networks: + - eveai-network + volumes: eveai_logs: diff --git a/docker/flower/Dockerfile b/docker/flower/Dockerfile new file mode 100644 index 0000000..e779d6b --- /dev/null +++ b/docker/flower/Dockerfile @@ -0,0 +1,34 @@ +ARG PYTHON_VERSION=3.12.3 +FROM python:${PYTHON_VERSION}-slim as base + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +WORKDIR /app + +ARG UID=10001 +RUN adduser \ + --disabled-password \ + --gecos "" \ + --home "/nonexistent" \ + --shell "/bin/bash" \ + --no-create-home \ + --uid "${UID}" \ + appuser + +RUN apt-get update && apt-get install -y \ + build-essential \ + gcc \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt /app/ +RUN pip install --no-cache-dir -r requirements.txt + +COPY . /app +COPY scripts/start_flower.sh /app/start_flower.sh +RUN chmod a+x /app/start_flower.sh + +USER appuser + +CMD ["/app/start_flower.sh"] diff --git a/nginx/nginx.conf b/nginx/nginx.conf index ae519a9..f3fb9f5 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -159,13 +159,12 @@ http { } location /flower/ { - proxy_pass http://127.0.0.1:5555/; + proxy_pass http://flower:5555/flower/; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; } - } include sites-enabled/*; diff --git a/requirements.txt b/requirements.txt index a03ad4d..d1fe15c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -79,4 +79,4 @@ flask-healthz~=1.0.1 langsmith~=0.1.121 anthropic~=0.34.2 prometheus-client~=0.20.0 - +flower~=2.0.1 diff --git a/scripts/start_flower.sh b/scripts/start_flower.sh old mode 100755 new mode 100644 index 34cd793..9a211f5 --- a/scripts/start_flower.sh +++ b/scripts/start_flower.sh @@ -1,9 +1,28 @@ -#!/usr/bin/env bash +#!/bin/bash +set -e -cd "/Volumes/OWC4M2_1/Dropbox/Josako's Dev/Josako/EveAI/Development/eveAI/" || exit 1 -source "/Volumes/OWC4M2_1/Dropbox/Josako's Dev/Josako/EveAI/Development/eveAI/.venv/bin/activate" +# scripts/start_flower.sh -# on development machine, no authentication required -export FLOWER_UNAUTHENTICATED_API=True -# Start a worker for the 'embeddings' queue with higher concurrency -celery -A eveai_workers.celery flower \ No newline at end of file +# Set default values +REDIS_HOST=${REDIS_URL:-redis} +REDIS_PORT=${REDIS_PORT:-6379} + +# Set environment-specific variables +if [ "$FLASK_ENV" = "production" ]; then + # Production settings + export FLOWER_BASIC_AUTH="${FLOWER_USER}:${FLOWER_PASSWORD}" + export FLOWER_BROKER_URL="redis://${REDIS_USER}:${REDIS_PASS}@${REDIS_URL}:${REDIS_PORT}/0" + export CELERY_BROKER_URL="redis://${REDIS_USER}:${REDIS_PASS}@${REDIS_URL}:${REDIS_PORT}/0" +else + # Development settings + export FLOWER_BROKER_URL="redis://${REDIS_HOST}:${REDIS_PORT}/0" + export CELERY_BROKER_URL="redis://${REDIS_HOST}:${REDIS_PORT}/0" +fi + +echo $BROKER_URL +echo "----------" + +# Start Flower +exec celery flower \ + --url-prefix=/flower \ + --port=5555