Enable model variables & start working on RAG task
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
from os import environ, path
|
||||
from datetime import timedelta
|
||||
import redis
|
||||
|
||||
basedir = path.abspath(path.dirname(__file__))
|
||||
|
||||
@@ -42,7 +43,7 @@ class Config(object):
|
||||
|
||||
# supported LLMs
|
||||
SUPPORTED_EMBEDDINGS = ['openai.text-embedding-3-small', 'mistral.mistral-embed']
|
||||
SUPPORTED_LLMS = ['openai.gpt-4-turbo', 'openai.gpt-3.5-turbo', 'mistral.mistral-large-2402']
|
||||
SUPPORTED_LLMS = ['openai.gpt-4o', 'openai.gpt-4-turbo', 'openai.gpt-3.5-turbo', 'mistral.mistral-large-2402']
|
||||
|
||||
# Celery settings
|
||||
CELERY_TASK_SERIALIZER = 'json'
|
||||
@@ -62,6 +63,20 @@ class Config(object):
|
||||
GPT3_5_SUMMARY_TEMPLATE = """Write a concise summary of the text in the same language as the provided text.
|
||||
Text is delimited between triple backquotes.
|
||||
```{text}```"""
|
||||
GPT4_RAG_TEMPLATE = """Answer the question based on the following context, both delimited between triple backquotes
|
||||
in the same language as question.
|
||||
If the question cannot be answered using the text, say "I don't know" in the same language as the question.
|
||||
Context:
|
||||
```{context}```
|
||||
Question:
|
||||
```{question}```"""
|
||||
GPT3_5_RAG_TEMPLATE = """Answer the question based on the following context, both delimited between triple backquotes
|
||||
in the same language as question.
|
||||
If the question cannot be answered using the text, say "I don't know" in the same language as the question.
|
||||
Context:
|
||||
```{context}```
|
||||
Question:
|
||||
```{question}```"""
|
||||
|
||||
# SocketIO settings
|
||||
# SOCKETIO_ASYNC_MODE = 'threading'
|
||||
@@ -91,8 +106,13 @@ class DevConfig(Config):
|
||||
UPLOAD_FOLDER = '/Volumes/OWC4M2_1/Development/eveAI/file_store'
|
||||
|
||||
# Celery settings
|
||||
CELERY_BROKER_URL = 'redis://localhost:6379/0' # Default Redis configuration
|
||||
# eveai_app Redis Settings
|
||||
CELERY_BROKER_URL = 'redis://localhost:6379/0'
|
||||
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
|
||||
# eveai_chat Redis Settings
|
||||
CELERY_BROKER_URL_CHAT = 'redis://localhost:6379/3'
|
||||
CELERY_RESULT_BACKEND_CHAT = 'redis://localhost:6379/3'
|
||||
|
||||
|
||||
# OpenAI API Keys
|
||||
OPENAI_API_KEY = 'sk-proj-8R0jWzwjL7PeoPyMhJTZT3BlbkFJLb6HfRB2Hr9cEVFWEhU7'
|
||||
@@ -118,12 +138,8 @@ class DevConfig(Config):
|
||||
JWT_SECRET_KEY = 'bsdMkmQ8ObfMD52yAFg4trrvjgjMhuIqg2fjDpD/JqvgY0ccCcmlsEnVFmR79WPiLKEA3i8a5zmejwLZKl4v9Q=='
|
||||
|
||||
# Session settings
|
||||
SESSION_REDIS = {
|
||||
'host': 'localhost', # Redis server hostname or IP address
|
||||
'port': 6379, # Redis server port
|
||||
'db': 2, # Redis database number (optional)
|
||||
'password': None # Redis password (optional)
|
||||
}
|
||||
SESSION_REDIS = redis.from_url('redis://localhost:6379/2')
|
||||
|
||||
|
||||
class ProdConfig(Config):
|
||||
DEVELOPMENT = False
|
||||
|
||||
@@ -26,6 +26,14 @@ LOGGING = {
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'file_chat_workers': {
|
||||
'level': 'DEBUG',
|
||||
'class': 'logging.handlers.RotatingFileHandler',
|
||||
'filename': 'logs/eveai_chat_workers.log',
|
||||
'maxBytes': 1024*1024*5, # 5MB
|
||||
'backupCount': 10,
|
||||
'formatter': 'standard',
|
||||
},
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'level': 'DEBUG',
|
||||
@@ -53,6 +61,11 @@ LOGGING = {
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'eveai_chat_workers': { # logger for the eveai_chat_workers
|
||||
'handlers': ['file_chat_workers', 'console'],
|
||||
'level': 'DEBUG',
|
||||
'propagate': False
|
||||
},
|
||||
'': { # root logger
|
||||
'handlers': ['console'],
|
||||
'level': 'WARNING', # Set higher level for root to minimize noise
|
||||
|
||||
Reference in New Issue
Block a user