- License Usage Calculation realised

- View License Usages
- Celery Beat container added
- First schedule in Celery Beat for calculating usage (hourly)
- repopack can now split for different components
- Various fixes as consequece of changing file_location / file_name ==> bucket_name / object_name
- Celery Routing / Queuing updated
This commit is contained in:
Josako
2024-10-11 16:33:36 +02:00
parent 5ffad160b1
commit 9f5f090f0c
57 changed files with 935 additions and 174 deletions

View File

@@ -0,0 +1,24 @@
"""Set storage_dirty flag for all tenants
Revision ID: 02debd224316
Revises: 8fdd7f2965c1
Create Date: 2024-10-08 06:53:17.261709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '02debd224316'
down_revision = '8fdd7f2965c1'
branch_labels = None
depends_on = None
def upgrade():
op.execute('UPDATE tenant SET storage_dirty = TRUE')
def downgrade():
pass

View File

@@ -0,0 +1,46 @@
"""LicenseUsage: correct mb fields to be floats iso integers
Revision ID: a678c84d5633
Revises: 02debd224316
Create Date: 2024-10-11 08:03:22.823327
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a678c84d5633'
down_revision = '02debd224316'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('license_usage', schema=None) as batch_op:
batch_op.alter_column('storage_mb_used',
existing_type=sa.INTEGER(),
type_=sa.Float(),
existing_nullable=True)
batch_op.alter_column('embedding_mb_used',
existing_type=sa.INTEGER(),
type_=sa.Float(),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('license_usage', schema=None) as batch_op:
batch_op.alter_column('embedding_mb_used',
existing_type=sa.Float(),
type_=sa.INTEGER(),
existing_nullable=True)
batch_op.alter_column('storage_mb_used',
existing_type=sa.Float(),
type_=sa.INTEGER(),
existing_nullable=True)
# ### end Alembic commands ###

View File

@@ -124,31 +124,34 @@ def run_migrations_online():
with connectable.connect() as connection:
tenants = get_tenant_ids()
for tenant in tenants:
logger.info(f"Migrating tenant: {tenant}")
# set search path on the connection, which ensures that
# PostgreSQL will emit all CREATE / ALTER / DROP statements
# in terms of this schema by default
connection.execute(text(f'SET search_path TO "{tenant}", public'))
# in SQLAlchemy v2+ the search path change needs to be committed
connection.commit()
try:
logger.info(f"Migrating tenant: {tenant}")
# set search path on the connection, which ensures that
# PostgreSQL will emit all CREATE / ALTER / DROP statements
# in terms of this schema by default
connection.execute(text(f'SET search_path TO "{tenant}", public'))
# in SQLAlchemy v2+ the search path change needs to be committed
connection.commit()
# make use of non-supported SQLAlchemy attribute to ensure
# the dialect reflects tables in terms of the current tenant name
connection.dialect.default_schema_name = str(tenant)
# make use of non-supported SQLAlchemy attribute to ensure
# the dialect reflects tables in terms of the current tenant name
connection.dialect.default_schema_name = str(tenant)
context.configure(
connection=connection,
target_metadata=get_metadata(),
# literal_binds=True,
include_object=include_object,
)
context.configure(
connection=connection,
target_metadata=get_metadata(),
# literal_binds=True,
include_object=include_object,
)
with context.begin_transaction():
context.run_migrations()
with context.begin_transaction():
context.run_migrations()
# for checking migrate or upgrade is running
if getattr(config.cmd_opts, "autogenerate", False):
break
# for checking migrate or upgrade is running
if getattr(config.cmd_opts, "autogenerate", False):
break
except Exception as e:
continue
if context.is_offline_mode():

View File

@@ -60,7 +60,7 @@ def upgrade():
except S3Error as e:
if e.code == "NoSuchKey":
current_app.logger.warning(
f"Object {doc_version.file_location} not found in bucket {doc_version.bucket_name}. Skipping.")
f"Object {doc_version.object_name} not found in bucket {doc_version.bucket_name}. Skipping.")
continue # Move to the next item
else:
raise e # Handle other types of S3 errors