Compare commits
8 Commits
72f79b1059
...
main
Author | SHA1 | Date | |
---|---|---|---|
988a857be4 | |||
44a3c06e5e | |||
54b47f6bef | |||
82e7712632 | |||
01a89a6129 | |||
90cf7a3fe5 | |||
ed43088637 | |||
d4f6c05075 |
@ -5,7 +5,7 @@
|
|||||||
# this is typically a path given in POSIX (e.g. forward slashes)
|
# this is typically a path given in POSIX (e.g. forward slashes)
|
||||||
# format, relative to the token %(here)s which refers to the location of this
|
# format, relative to the token %(here)s which refers to the location of this
|
||||||
# ini file
|
# ini file
|
||||||
script_location = %(here)s/dev/alembic
|
script_location = alembic
|
||||||
|
|
||||||
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
||||||
# Uncomment the line below if you want the files to be prepended with date and time
|
# Uncomment the line below if you want the files to be prepended with date and time
|
||||||
|
1
alembic/README
Normal file
1
alembic/README
Normal file
@ -0,0 +1 @@
|
|||||||
|
Generic single-database configuration.
|
61
alembic/env.py
Normal file
61
alembic/env.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
from logging.config import fileConfig
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from sqlalchemy import engine_from_config
|
||||||
|
from sqlalchemy import pool
|
||||||
|
from alembic import context
|
||||||
|
|
||||||
|
# Add the project root to the Python path
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||||
|
|
||||||
|
# Import your SQLAlchemy models and engine
|
||||||
|
from models import SQLModel
|
||||||
|
from database import engine
|
||||||
|
|
||||||
|
# this is the Alembic Config object, which provides
|
||||||
|
# access to the values within the .ini file in use.
|
||||||
|
config = context.config
|
||||||
|
|
||||||
|
# Interpret the config file for Python logging.
|
||||||
|
# This line sets up loggers basically.
|
||||||
|
if config.config_file_name is not None:
|
||||||
|
fileConfig(config.config_file_name)
|
||||||
|
|
||||||
|
# Import all your SQLModel models here so that Alembic can detect them
|
||||||
|
from models import User, DBSession
|
||||||
|
|
||||||
|
# Set the target metadata to SQLModel.metadata
|
||||||
|
target_metadata = SQLModel.metadata
|
||||||
|
|
||||||
|
def run_migrations_offline() -> None:
|
||||||
|
"""Run migrations in 'offline' mode."""
|
||||||
|
url = config.get_main_option("sqlalchemy.url")
|
||||||
|
context.configure(
|
||||||
|
url=url,
|
||||||
|
target_metadata=target_metadata,
|
||||||
|
literal_binds=True,
|
||||||
|
dialect_opts={"paramstyle": "named"},
|
||||||
|
compare_type=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
with context.begin_transaction():
|
||||||
|
context.run_migrations()
|
||||||
|
|
||||||
|
def run_migrations_online() -> None:
|
||||||
|
"""Run migrations in 'online' mode."""
|
||||||
|
connectable = engine
|
||||||
|
|
||||||
|
with connectable.connect() as connection:
|
||||||
|
context.configure(
|
||||||
|
connection=connection,
|
||||||
|
target_metadata=target_metadata,
|
||||||
|
compare_type=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
with context.begin_transaction():
|
||||||
|
context.run_migrations()
|
||||||
|
|
||||||
|
if context.is_offline_mode():
|
||||||
|
run_migrations_offline()
|
||||||
|
else:
|
||||||
|
run_migrations_online()
|
@ -0,0 +1,71 @@
|
|||||||
|
"""Add PublicStream model
|
||||||
|
|
||||||
|
Revision ID: 0df481ee920b
|
||||||
|
Revises: f86c93c7a872
|
||||||
|
Create Date: 2025-07-19 10:02:22.902696
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = '0df481ee920b'
|
||||||
|
down_revision: Union[str, Sequence[str], None] = 'f86c93c7a872'
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = None
|
||||||
|
depends_on: Union[str, Sequence[str], None] = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
"""Upgrade schema."""
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
# First create the new publicstream table
|
||||||
|
op.create_table('publicstream',
|
||||||
|
sa.Column('uid', sa.String(), nullable=False),
|
||||||
|
sa.Column('size', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('mtime', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('updated_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('uid')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Drop the foreign key constraint first
|
||||||
|
op.drop_constraint('dbsession_user_id_fkey', 'dbsession', type_='foreignkey')
|
||||||
|
|
||||||
|
# Then drop the unique constraint
|
||||||
|
op.drop_constraint(op.f('uq_user_username'), 'user', type_='unique')
|
||||||
|
|
||||||
|
# Create the new index
|
||||||
|
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
|
||||||
|
|
||||||
|
# Recreate the foreign key constraint
|
||||||
|
op.create_foreign_key(
|
||||||
|
'dbsession_user_id_fkey', 'dbsession', 'user',
|
||||||
|
['user_id'], ['username'], ondelete='CASCADE'
|
||||||
|
)
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
"""Downgrade schema."""
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
# Drop the foreign key constraint first
|
||||||
|
op.drop_constraint('dbsession_user_id_fkey', 'dbsession', type_='foreignkey')
|
||||||
|
|
||||||
|
# Drop the index
|
||||||
|
op.drop_index(op.f('ix_user_username'), table_name='user')
|
||||||
|
|
||||||
|
# Recreate the unique constraint
|
||||||
|
op.create_unique_constraint(op.f('uq_user_username'), 'user', ['username'])
|
||||||
|
|
||||||
|
# Recreate the foreign key constraint
|
||||||
|
op.create_foreign_key(
|
||||||
|
'dbsession_user_id_fkey', 'dbsession', 'user',
|
||||||
|
['user_id'], ['username'], ondelete='CASCADE'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Drop the publicstream table
|
||||||
|
op.drop_table('publicstream')
|
||||||
|
# ### end Alembic commands ###
|
86
alembic/legacy_versions/1ab2db0e4b5e_make_username_unique.py
Normal file
86
alembic/legacy_versions/1ab2db0e4b5e_make_username_unique.py
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
"""make username unique
|
||||||
|
|
||||||
|
Revision ID: 1ab2db0e4b5e
|
||||||
|
Revises:
|
||||||
|
Create Date: 2025-06-27 13:04:10.085253
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy.dialects import postgresql
|
||||||
|
import sqlmodel
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = '1ab2db0e4b5e'
|
||||||
|
down_revision: Union[str, Sequence[str], None] = None
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = None
|
||||||
|
depends_on: Union[str, Sequence[str], None] = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
"""Upgrade schema."""
|
||||||
|
# 1. First, add the unique constraint to the username column
|
||||||
|
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||||
|
batch_op.create_unique_constraint('uq_user_username', ['username'])
|
||||||
|
|
||||||
|
# 2. Now create the dbsession table with the foreign key
|
||||||
|
op.create_table('dbsession',
|
||||||
|
sa.Column('token', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||||
|
sa.Column('user_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||||
|
sa.Column('ip_address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||||
|
sa.Column('user_agent', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
||||||
|
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('expires_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('is_active', sa.Boolean(), nullable=False),
|
||||||
|
sa.Column('last_activity', sa.DateTime(), nullable=False),
|
||||||
|
sa.ForeignKeyConstraint(['user_id'], ['user.username'], ),
|
||||||
|
sa.PrimaryKeyConstraint('token')
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Drop old tables if they exist
|
||||||
|
if op.get_bind().engine.dialect.has_table(op.get_bind(), 'session'):
|
||||||
|
op.drop_index(op.f('ix_session_token'), table_name='session')
|
||||||
|
op.drop_index(op.f('ix_session_user_id'), table_name='session')
|
||||||
|
op.drop_table('session')
|
||||||
|
|
||||||
|
if op.get_bind().engine.dialect.has_table(op.get_bind(), 'publicstream'):
|
||||||
|
op.drop_table('publicstream')
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
"""Downgrade schema."""
|
||||||
|
# 1. First drop the dbsession table
|
||||||
|
op.drop_table('dbsession')
|
||||||
|
|
||||||
|
# 2. Recreate the old tables
|
||||||
|
op.create_table('publicstream',
|
||||||
|
sa.Column('uid', sa.VARCHAR(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('size', sa.INTEGER(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('mtime', sa.INTEGER(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('uid', name=op.f('publicstream_pkey'))
|
||||||
|
)
|
||||||
|
|
||||||
|
op.create_table('session',
|
||||||
|
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
|
||||||
|
sa.Column('user_id', sa.VARCHAR(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('token', sa.TEXT(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('ip_address', sa.VARCHAR(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('user_agent', sa.VARCHAR(), autoincrement=False, nullable=True),
|
||||||
|
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('expires_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('last_used_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
|
||||||
|
sa.Column('is_active', sa.BOOLEAN(), autoincrement=False, nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('id', name=op.f('session_pkey'))
|
||||||
|
)
|
||||||
|
|
||||||
|
op.create_index(op.f('ix_session_user_id'), 'session', ['user_id'], unique=False)
|
||||||
|
op.create_index(op.f('ix_session_token'), 'session', ['token'], unique=True)
|
||||||
|
|
||||||
|
# 3. Finally, remove the unique constraint from username
|
||||||
|
with op.batch_alter_table('user', schema=None) as batch_op:
|
||||||
|
batch_op.drop_constraint('uq_user_username', type_='unique')
|
||||||
|
# ### end Alembic commands ###
|
@ -0,0 +1,49 @@
|
|||||||
|
"""add_display_name_to_user
|
||||||
|
|
||||||
|
Revision ID: 8be4811023d8
|
||||||
|
Revises: 0df481ee920b
|
||||||
|
Create Date: 2025-07-19 19:46:01.129412
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
import sqlmodel
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = '8be4811023d8'
|
||||||
|
down_revision: Union[str, Sequence[str], None] = '0df481ee920b'
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = None
|
||||||
|
depends_on: Union[str, Sequence[str], None] = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
"""Upgrade schema."""
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_constraint(op.f('dbsession_user_id_fkey'), 'dbsession', type_='foreignkey')
|
||||||
|
op.create_foreign_key(None, 'dbsession', 'user', ['user_id'], ['username'])
|
||||||
|
op.alter_column('publicstream', 'storage_bytes',
|
||||||
|
existing_type=sa.INTEGER(),
|
||||||
|
nullable=False,
|
||||||
|
existing_server_default=sa.text('0'))
|
||||||
|
op.create_index(op.f('ix_publicstream_username'), 'publicstream', ['username'], unique=False)
|
||||||
|
op.drop_column('publicstream', 'size')
|
||||||
|
op.add_column('user', sa.Column('display_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
"""Downgrade schema."""
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_column('user', 'display_name')
|
||||||
|
op.add_column('publicstream', sa.Column('size', sa.INTEGER(), autoincrement=False, nullable=False))
|
||||||
|
op.drop_index(op.f('ix_publicstream_username'), table_name='publicstream')
|
||||||
|
op.alter_column('publicstream', 'storage_bytes',
|
||||||
|
existing_type=sa.INTEGER(),
|
||||||
|
nullable=True,
|
||||||
|
existing_server_default=sa.text('0'))
|
||||||
|
op.drop_constraint(None, 'dbsession', type_='foreignkey')
|
||||||
|
op.create_foreign_key(op.f('dbsession_user_id_fkey'), 'dbsession', 'user', ['user_id'], ['username'], ondelete='CASCADE')
|
||||||
|
# ### end Alembic commands ###
|
@ -0,0 +1,30 @@
|
|||||||
|
"""add_processed_filename_to_uploadlog
|
||||||
|
|
||||||
|
Revision ID: f86c93c7a872
|
||||||
|
Revises: 1ab2db0e4b5e
|
||||||
|
Create Date: 2025-06-28 15:56:29.169668
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = 'f86c93c7a872'
|
||||||
|
down_revision: Union[str, Sequence[str], None] = '1ab2db0e4b5e'
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = None
|
||||||
|
depends_on: Union[str, Sequence[str], None] = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
"""Upgrade schema."""
|
||||||
|
op.add_column('uploadlog',
|
||||||
|
sa.Column('processed_filename', sa.String(), nullable=True),
|
||||||
|
schema=None)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
"""Downgrade schema."""
|
||||||
|
op.drop_column('uploadlog', 'processed_filename', schema=None)
|
28
alembic/script.py.mako
Normal file
28
alembic/script.py.mako
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
"""${message}
|
||||||
|
|
||||||
|
Revision ID: ${up_revision}
|
||||||
|
Revises: ${down_revision | comma,n}
|
||||||
|
Create Date: ${create_date}
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
${imports if imports else ""}
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = ${repr(up_revision)}
|
||||||
|
down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
||||||
|
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
"""Upgrade schema."""
|
||||||
|
${upgrades if upgrades else "pass"}
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
"""Downgrade schema."""
|
||||||
|
${downgrades if downgrades else "pass"}
|
85
alembic/versions/5f0b37b50730_initial_base_schema.py
Normal file
85
alembic/versions/5f0b37b50730_initial_base_schema.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
"""initial base schema
|
||||||
|
|
||||||
|
Revision ID: 5f0b37b50730
|
||||||
|
Revises:
|
||||||
|
Create Date: 2025-08-08 08:42:06.859256
|
||||||
|
|
||||||
|
"""
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = '5f0b37b50730'
|
||||||
|
down_revision: Union[str, Sequence[str], None] = None
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = None
|
||||||
|
depends_on: Union[str, Sequence[str], None] = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
"""Upgrade schema."""
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.create_table('publicstream',
|
||||||
|
sa.Column('uid', sa.String(), nullable=False),
|
||||||
|
sa.Column('username', sa.String(), nullable=True),
|
||||||
|
sa.Column('storage_bytes', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('mtime', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('last_updated', sa.DateTime(), nullable=True),
|
||||||
|
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('updated_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('uid')
|
||||||
|
)
|
||||||
|
op.create_index(op.f('ix_publicstream_username'), 'publicstream', ['username'], unique=False)
|
||||||
|
op.create_table('uploadlog',
|
||||||
|
sa.Column('id', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('uid', sa.String(), nullable=False),
|
||||||
|
sa.Column('ip', sa.String(), nullable=False),
|
||||||
|
sa.Column('filename', sa.String(), nullable=True),
|
||||||
|
sa.Column('processed_filename', sa.String(), nullable=True),
|
||||||
|
sa.Column('size_bytes', sa.Integer(), nullable=False),
|
||||||
|
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('id')
|
||||||
|
)
|
||||||
|
op.create_table('user',
|
||||||
|
sa.Column('token_created', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('email', sa.String(), nullable=False),
|
||||||
|
sa.Column('username', sa.String(), nullable=False),
|
||||||
|
sa.Column('token', sa.String(), nullable=False),
|
||||||
|
sa.Column('confirmed', sa.Boolean(), nullable=False),
|
||||||
|
sa.Column('ip', sa.String(), nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('email')
|
||||||
|
)
|
||||||
|
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
|
||||||
|
op.create_table('userquota',
|
||||||
|
sa.Column('uid', sa.String(), nullable=False),
|
||||||
|
sa.Column('storage_bytes', sa.Integer(), nullable=False),
|
||||||
|
sa.PrimaryKeyConstraint('uid')
|
||||||
|
)
|
||||||
|
op.create_table('dbsession',
|
||||||
|
sa.Column('token', sa.String(), nullable=False),
|
||||||
|
sa.Column('uid', sa.String(), nullable=False),
|
||||||
|
sa.Column('ip_address', sa.String(), nullable=False),
|
||||||
|
sa.Column('user_agent', sa.String(), nullable=False),
|
||||||
|
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('expires_at', sa.DateTime(), nullable=False),
|
||||||
|
sa.Column('is_active', sa.Boolean(), nullable=False),
|
||||||
|
sa.Column('last_activity', sa.DateTime(), nullable=False),
|
||||||
|
sa.ForeignKeyConstraint(['uid'], ['user.email'], ),
|
||||||
|
sa.PrimaryKeyConstraint('token')
|
||||||
|
)
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
"""Downgrade schema."""
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_table('dbsession')
|
||||||
|
op.drop_table('userquota')
|
||||||
|
op.drop_index(op.f('ix_user_username'), table_name='user')
|
||||||
|
op.drop_table('user')
|
||||||
|
op.drop_table('uploadlog')
|
||||||
|
op.drop_index(op.f('ix_publicstream_username'), table_name='publicstream')
|
||||||
|
op.drop_table('publicstream')
|
||||||
|
# ### end Alembic commands ###
|
@ -1,355 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Database Legacy Data Analysis Script
|
|
||||||
Analyzes the database for legacy data that doesn't match current authentication implementation
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from sqlmodel import Session, select
|
|
||||||
from database import engine
|
|
||||||
from models import User, UserQuota, UploadLog, DBSession, PublicStream
|
|
||||||
import re
|
|
||||||
|
|
||||||
def validate_email_format(email):
|
|
||||||
"""Validate email format using RFC 5322 compliant regex"""
|
|
||||||
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
|
||||||
return re.match(pattern, email) is not None
|
|
||||||
|
|
||||||
def analyze_user_table():
|
|
||||||
"""Analyze User table for legacy data issues"""
|
|
||||||
print("\n=== ANALYZING USER TABLE ===")
|
|
||||||
issues = []
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
users = session.exec(select(User)).all()
|
|
||||||
print(f"Total users: {len(users)}")
|
|
||||||
|
|
||||||
for user in users:
|
|
||||||
user_issues = []
|
|
||||||
|
|
||||||
# Check if email (primary key) is valid email format
|
|
||||||
if not validate_email_format(user.email):
|
|
||||||
user_issues.append(f"Invalid email format: {user.email}")
|
|
||||||
|
|
||||||
# Check if username is also email format (current requirement)
|
|
||||||
if not validate_email_format(user.username):
|
|
||||||
user_issues.append(f"Username not in email format: {user.username}")
|
|
||||||
|
|
||||||
# Check if email and username match (should be same after migration)
|
|
||||||
if user.email != user.username:
|
|
||||||
user_issues.append(f"Email/username mismatch: email={user.email}, username={user.username}")
|
|
||||||
|
|
||||||
# Check for missing or empty display_name
|
|
||||||
if not user.display_name or user.display_name.strip() == "":
|
|
||||||
user_issues.append(f"Empty display_name")
|
|
||||||
|
|
||||||
# Check for very old tokens (potential security issue)
|
|
||||||
if user.token_created < datetime.utcnow() - timedelta(days=30):
|
|
||||||
user_issues.append(f"Very old token (created: {user.token_created})")
|
|
||||||
|
|
||||||
# Check for unconfirmed users
|
|
||||||
if not user.confirmed:
|
|
||||||
user_issues.append(f"Unconfirmed user")
|
|
||||||
|
|
||||||
if user_issues:
|
|
||||||
issues.append({
|
|
||||||
'email': user.email,
|
|
||||||
'username': user.username,
|
|
||||||
'issues': user_issues
|
|
||||||
})
|
|
||||||
|
|
||||||
print(f"Users with issues: {len(issues)}")
|
|
||||||
for issue in issues:
|
|
||||||
print(f" User {issue['email']}:")
|
|
||||||
for problem in issue['issues']:
|
|
||||||
print(f" - {problem}")
|
|
||||||
|
|
||||||
return issues
|
|
||||||
|
|
||||||
def analyze_session_table():
|
|
||||||
"""Analyze DBSession table for legacy data issues"""
|
|
||||||
print("\n=== ANALYZING SESSION TABLE ===")
|
|
||||||
issues = []
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
sessions = session.exec(select(DBSession)).all()
|
|
||||||
print(f"Total sessions: {len(sessions)}")
|
|
||||||
|
|
||||||
active_sessions = [s for s in sessions if s.is_active]
|
|
||||||
expired_sessions = [s for s in sessions if s.expires_at < datetime.utcnow()]
|
|
||||||
old_sessions = [s for s in sessions if s.created_at < datetime.utcnow() - timedelta(days=7)]
|
|
||||||
|
|
||||||
print(f"Active sessions: {len(active_sessions)}")
|
|
||||||
print(f"Expired sessions: {len(expired_sessions)}")
|
|
||||||
print(f"Sessions older than 7 days: {len(old_sessions)}")
|
|
||||||
|
|
||||||
for db_session in sessions:
|
|
||||||
session_issues = []
|
|
||||||
|
|
||||||
# Check if user_id is in email format (current requirement)
|
|
||||||
if not validate_email_format(db_session.user_id):
|
|
||||||
session_issues.append(f"user_id not in email format: {db_session.user_id}")
|
|
||||||
|
|
||||||
# Check for expired but still active sessions
|
|
||||||
if db_session.is_active and db_session.expires_at < datetime.utcnow():
|
|
||||||
session_issues.append(f"Expired but still marked active (expires: {db_session.expires_at})")
|
|
||||||
|
|
||||||
# Check for very old sessions that should be cleaned up
|
|
||||||
if db_session.created_at < datetime.utcnow() - timedelta(days=30):
|
|
||||||
session_issues.append(f"Very old session (created: {db_session.created_at})")
|
|
||||||
|
|
||||||
# Check for sessions with 1-hour expiry (old system)
|
|
||||||
session_duration = db_session.expires_at - db_session.created_at
|
|
||||||
if session_duration < timedelta(hours=2): # Less than 2 hours indicates old 1-hour sessions
|
|
||||||
session_issues.append(f"Short session duration: {session_duration} (should be 24h)")
|
|
||||||
|
|
||||||
if session_issues:
|
|
||||||
issues.append({
|
|
||||||
'token': db_session.token[:10] + '...',
|
|
||||||
'user_id': db_session.user_id,
|
|
||||||
'created_at': db_session.created_at,
|
|
||||||
'expires_at': db_session.expires_at,
|
|
||||||
'issues': session_issues
|
|
||||||
})
|
|
||||||
|
|
||||||
print(f"Sessions with issues: {len(issues)}")
|
|
||||||
for issue in issues:
|
|
||||||
print(f" Session {issue['token']} (user: {issue['user_id']}):")
|
|
||||||
for problem in issue['issues']:
|
|
||||||
print(f" - {problem}")
|
|
||||||
|
|
||||||
return issues
|
|
||||||
|
|
||||||
def analyze_quota_table():
|
|
||||||
"""Analyze UserQuota table for legacy data issues"""
|
|
||||||
print("\n=== ANALYZING USER QUOTA TABLE ===")
|
|
||||||
issues = []
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
quotas = session.exec(select(UserQuota)).all()
|
|
||||||
print(f"Total quota records: {len(quotas)}")
|
|
||||||
|
|
||||||
for quota in quotas:
|
|
||||||
quota_issues = []
|
|
||||||
|
|
||||||
# Check if uid is in email format (current requirement)
|
|
||||||
if not validate_email_format(quota.uid):
|
|
||||||
quota_issues.append(f"UID not in email format: {quota.uid}")
|
|
||||||
|
|
||||||
# Check for negative storage
|
|
||||||
if quota.storage_bytes < 0:
|
|
||||||
quota_issues.append(f"Negative storage: {quota.storage_bytes}")
|
|
||||||
|
|
||||||
# Check for excessive storage (over 100MB limit)
|
|
||||||
if quota.storage_bytes > 100 * 1024 * 1024:
|
|
||||||
quota_issues.append(f"Storage over 100MB limit: {quota.storage_bytes / (1024*1024):.1f}MB")
|
|
||||||
|
|
||||||
if quota_issues:
|
|
||||||
issues.append({
|
|
||||||
'uid': quota.uid,
|
|
||||||
'storage_bytes': quota.storage_bytes,
|
|
||||||
'issues': quota_issues
|
|
||||||
})
|
|
||||||
|
|
||||||
print(f"Quota records with issues: {len(issues)}")
|
|
||||||
for issue in issues:
|
|
||||||
print(f" Quota {issue['uid']} ({issue['storage_bytes']} bytes):")
|
|
||||||
for problem in issue['issues']:
|
|
||||||
print(f" - {problem}")
|
|
||||||
|
|
||||||
return issues
|
|
||||||
|
|
||||||
def analyze_upload_log_table():
|
|
||||||
"""Analyze UploadLog table for legacy data issues"""
|
|
||||||
print("\n=== ANALYZING UPLOAD LOG TABLE ===")
|
|
||||||
issues = []
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
uploads = session.exec(select(UploadLog)).all()
|
|
||||||
print(f"Total upload records: {len(uploads)}")
|
|
||||||
|
|
||||||
for upload in uploads:
|
|
||||||
upload_issues = []
|
|
||||||
|
|
||||||
# Check if uid is in email format (current requirement)
|
|
||||||
if not validate_email_format(upload.uid):
|
|
||||||
upload_issues.append(f"UID not in email format: {upload.uid}")
|
|
||||||
|
|
||||||
# Check for missing processed_filename
|
|
||||||
if not upload.processed_filename:
|
|
||||||
upload_issues.append(f"Missing processed_filename")
|
|
||||||
|
|
||||||
# Check for negative file size
|
|
||||||
if upload.size_bytes < 0:
|
|
||||||
upload_issues.append(f"Negative file size: {upload.size_bytes}")
|
|
||||||
|
|
||||||
# Check for very old uploads
|
|
||||||
if upload.created_at < datetime.utcnow() - timedelta(days=365):
|
|
||||||
upload_issues.append(f"Very old upload (created: {upload.created_at})")
|
|
||||||
|
|
||||||
if upload_issues:
|
|
||||||
issues.append({
|
|
||||||
'id': upload.id,
|
|
||||||
'uid': upload.uid,
|
|
||||||
'filename': upload.filename,
|
|
||||||
'created_at': upload.created_at,
|
|
||||||
'issues': upload_issues
|
|
||||||
})
|
|
||||||
|
|
||||||
print(f"Upload records with issues: {len(issues)}")
|
|
||||||
for issue in issues:
|
|
||||||
print(f" Upload {issue['id']} (user: {issue['uid']}, file: {issue['filename']}):")
|
|
||||||
for problem in issue['issues']:
|
|
||||||
print(f" - {problem}")
|
|
||||||
|
|
||||||
return issues
|
|
||||||
|
|
||||||
def analyze_public_stream_table():
|
|
||||||
"""Analyze PublicStream table for legacy data issues"""
|
|
||||||
print("\n=== ANALYZING PUBLIC STREAM TABLE ===")
|
|
||||||
issues = []
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
streams = session.exec(select(PublicStream)).all()
|
|
||||||
print(f"Total public stream records: {len(streams)}")
|
|
||||||
|
|
||||||
for stream in streams:
|
|
||||||
stream_issues = []
|
|
||||||
|
|
||||||
# Check if uid is in email format (current requirement)
|
|
||||||
if not validate_email_format(stream.uid):
|
|
||||||
stream_issues.append(f"UID not in email format: {stream.uid}")
|
|
||||||
|
|
||||||
# Check if username is also email format (should match uid)
|
|
||||||
if stream.username and not validate_email_format(stream.username):
|
|
||||||
stream_issues.append(f"Username not in email format: {stream.username}")
|
|
||||||
|
|
||||||
# Check if uid and username match (should be same after migration)
|
|
||||||
if stream.username and stream.uid != stream.username:
|
|
||||||
stream_issues.append(f"UID/username mismatch: uid={stream.uid}, username={stream.username}")
|
|
||||||
|
|
||||||
# Check for negative storage
|
|
||||||
if stream.storage_bytes < 0:
|
|
||||||
stream_issues.append(f"Negative storage: {stream.storage_bytes}")
|
|
||||||
|
|
||||||
# Check for missing display_name
|
|
||||||
if not stream.display_name or stream.display_name.strip() == "":
|
|
||||||
stream_issues.append(f"Empty display_name")
|
|
||||||
|
|
||||||
if stream_issues:
|
|
||||||
issues.append({
|
|
||||||
'uid': stream.uid,
|
|
||||||
'username': stream.username,
|
|
||||||
'display_name': stream.display_name,
|
|
||||||
'issues': stream_issues
|
|
||||||
})
|
|
||||||
|
|
||||||
print(f"Public stream records with issues: {len(issues)}")
|
|
||||||
for issue in issues:
|
|
||||||
print(f" Stream {issue['uid']} (username: {issue['username']}):")
|
|
||||||
for problem in issue['issues']:
|
|
||||||
print(f" - {problem}")
|
|
||||||
|
|
||||||
return issues
|
|
||||||
|
|
||||||
def check_referential_integrity():
|
|
||||||
"""Check for referential integrity issues between tables"""
|
|
||||||
print("\n=== CHECKING REFERENTIAL INTEGRITY ===")
|
|
||||||
issues = []
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
# Get all unique UIDs from each table
|
|
||||||
users = session.exec(select(User.email)).all()
|
|
||||||
user_usernames = session.exec(select(User.username)).all()
|
|
||||||
quotas = session.exec(select(UserQuota.uid)).all()
|
|
||||||
uploads = session.exec(select(UploadLog.uid)).all()
|
|
||||||
streams = session.exec(select(PublicStream.uid)).all()
|
|
||||||
sessions = session.exec(select(DBSession.user_id)).all()
|
|
||||||
|
|
||||||
user_emails = set(users)
|
|
||||||
user_usernames_set = set(user_usernames)
|
|
||||||
quota_uids = set(quotas)
|
|
||||||
upload_uids = set(uploads)
|
|
||||||
stream_uids = set(streams)
|
|
||||||
session_uids = set(sessions)
|
|
||||||
|
|
||||||
print(f"Unique user emails: {len(user_emails)}")
|
|
||||||
print(f"Unique user usernames: {len(user_usernames_set)}")
|
|
||||||
print(f"Unique quota UIDs: {len(quota_uids)}")
|
|
||||||
print(f"Unique upload UIDs: {len(upload_uids)}")
|
|
||||||
print(f"Unique stream UIDs: {len(stream_uids)}")
|
|
||||||
print(f"Unique session user_ids: {len(session_uids)}")
|
|
||||||
|
|
||||||
# Check for orphaned records
|
|
||||||
orphaned_quotas = quota_uids - user_emails
|
|
||||||
orphaned_uploads = upload_uids - user_emails
|
|
||||||
orphaned_streams = stream_uids - user_emails
|
|
||||||
orphaned_sessions = session_uids - user_usernames_set # Sessions use username as user_id
|
|
||||||
|
|
||||||
if orphaned_quotas:
|
|
||||||
issues.append(f"Orphaned quota records (no matching user): {orphaned_quotas}")
|
|
||||||
|
|
||||||
if orphaned_uploads:
|
|
||||||
issues.append(f"Orphaned upload records (no matching user): {orphaned_uploads}")
|
|
||||||
|
|
||||||
if orphaned_streams:
|
|
||||||
issues.append(f"Orphaned stream records (no matching user): {orphaned_streams}")
|
|
||||||
|
|
||||||
if orphaned_sessions:
|
|
||||||
issues.append(f"Orphaned session records (no matching user): {orphaned_sessions}")
|
|
||||||
|
|
||||||
# Check for users without quota records
|
|
||||||
users_without_quota = user_emails - quota_uids
|
|
||||||
if users_without_quota:
|
|
||||||
issues.append(f"Users without quota records: {users_without_quota}")
|
|
||||||
|
|
||||||
# Check for users without stream records
|
|
||||||
users_without_streams = user_emails - stream_uids
|
|
||||||
if users_without_streams:
|
|
||||||
issues.append(f"Users without stream records: {users_without_streams}")
|
|
||||||
|
|
||||||
print(f"Referential integrity issues: {len(issues)}")
|
|
||||||
for issue in issues:
|
|
||||||
print(f" - {issue}")
|
|
||||||
|
|
||||||
return issues
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Run complete database legacy analysis"""
|
|
||||||
print("=== DATABASE LEGACY DATA ANALYSIS ===")
|
|
||||||
print(f"Analysis started at: {datetime.utcnow()}")
|
|
||||||
|
|
||||||
all_issues = {}
|
|
||||||
|
|
||||||
try:
|
|
||||||
all_issues['users'] = analyze_user_table()
|
|
||||||
all_issues['sessions'] = analyze_session_table()
|
|
||||||
all_issues['quotas'] = analyze_quota_table()
|
|
||||||
all_issues['uploads'] = analyze_upload_log_table()
|
|
||||||
all_issues['streams'] = analyze_public_stream_table()
|
|
||||||
all_issues['integrity'] = check_referential_integrity()
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
print("\n=== SUMMARY ===")
|
|
||||||
total_issues = sum(len(issues) if isinstance(issues, list) else 1 for issues in all_issues.values())
|
|
||||||
print(f"Total issues found: {total_issues}")
|
|
||||||
|
|
||||||
for table, issues in all_issues.items():
|
|
||||||
if issues:
|
|
||||||
count = len(issues) if isinstance(issues, list) else 1
|
|
||||||
print(f" {table}: {count} issues")
|
|
||||||
|
|
||||||
if total_issues == 0:
|
|
||||||
print("✅ No legacy data issues found! Database is clean.")
|
|
||||||
else:
|
|
||||||
print("⚠️ Legacy data issues found. Consider running cleanup scripts.")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error during analysis: {e}")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main())
|
|
@ -1,31 +0,0 @@
|
|||||||
-- Cleanup script for old format user 'devuser'
|
|
||||||
-- This user has username-based UID instead of email-based UID
|
|
||||||
|
|
||||||
-- Show what will be deleted before deletion
|
|
||||||
SELECT 'publicstream entries to delete:' as info;
|
|
||||||
SELECT uid, username, storage_bytes, created_at FROM publicstream WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
SELECT 'uploadlog entries to delete:' as info;
|
|
||||||
SELECT COUNT(*) as count, uid FROM uploadlog WHERE uid = 'devuser' GROUP BY uid;
|
|
||||||
|
|
||||||
SELECT 'userquota entries to delete:' as info;
|
|
||||||
SELECT uid FROM userquota WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
-- Delete from all related tables
|
|
||||||
-- Start with dependent tables first
|
|
||||||
DELETE FROM uploadlog WHERE uid = 'devuser';
|
|
||||||
DELETE FROM userquota WHERE uid = 'devuser';
|
|
||||||
DELETE FROM publicstream WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
-- Verify cleanup
|
|
||||||
SELECT 'Remaining entries for devuser in publicstream:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM publicstream WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
SELECT 'Remaining entries for devuser in uploadlog:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM uploadlog WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
SELECT 'Remaining entries for devuser in userquota:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM userquota WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
SELECT 'Total remaining old format entries in publicstream:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM publicstream WHERE uid NOT LIKE '%@%' OR uid = username;
|
|
@ -1,19 +0,0 @@
|
|||||||
-- Final cleanup of orphaned entries that prevent proper account deletion
|
|
||||||
-- These entries have username-based UIDs that should have been deleted
|
|
||||||
|
|
||||||
-- Show what will be deleted
|
|
||||||
SELECT 'Orphaned publicstream entries to delete:' as info;
|
|
||||||
SELECT uid, username FROM publicstream WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
SELECT 'Orphaned userquota entries to delete:' as info;
|
|
||||||
SELECT uid, storage_bytes FROM userquota WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Delete the orphaned entries
|
|
||||||
DELETE FROM publicstream WHERE uid = 'oibchello';
|
|
||||||
DELETE FROM userquota WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Verify cleanup
|
|
||||||
SELECT 'Remaining entries for oibchello:' as info;
|
|
||||||
SELECT 'publicstream' as table_name, COUNT(*) as count FROM publicstream WHERE uid = 'oibchello'
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'userquota' as table_name, COUNT(*) as count FROM userquota WHERE uid = 'oibchello';
|
|
@ -1,169 +0,0 @@
|
|||||||
-- Database Legacy Data Cleanup Script
|
|
||||||
-- Fixes issues identified in the database analysis
|
|
||||||
-- Execute these queries step by step to fix legacy data
|
|
||||||
|
|
||||||
-- =============================================================================
|
|
||||||
-- STEP 1: Fix User Table - Update username to match email format
|
|
||||||
-- =============================================================================
|
|
||||||
-- Issue: User has username 'oibchello' but email 'oib@chello.at'
|
|
||||||
-- Fix: Update username to match email (current authentication requirement)
|
|
||||||
|
|
||||||
UPDATE "user"
|
|
||||||
SET username = email,
|
|
||||||
display_name = CASE
|
|
||||||
WHEN display_name = '' OR display_name IS NULL
|
|
||||||
THEN split_part(email, '@', 1) -- Use email prefix as display name
|
|
||||||
ELSE display_name
|
|
||||||
END
|
|
||||||
WHERE email = 'oib@chello.at';
|
|
||||||
|
|
||||||
-- Verify the fix
|
|
||||||
SELECT email, username, display_name, confirmed FROM "user" WHERE email = 'oib@chello.at';
|
|
||||||
|
|
||||||
-- =============================================================================
|
|
||||||
-- STEP 2: Clean Up Expired Sessions
|
|
||||||
-- =============================================================================
|
|
||||||
-- Issue: 11 expired sessions still marked as active (security risk)
|
|
||||||
-- Fix: Mark expired sessions as inactive
|
|
||||||
|
|
||||||
UPDATE dbsession
|
|
||||||
SET is_active = false
|
|
||||||
WHERE expires_at < NOW() AND is_active = true;
|
|
||||||
|
|
||||||
-- Verify expired sessions are now inactive
|
|
||||||
SELECT COUNT(*) as expired_active_sessions
|
|
||||||
FROM dbsession
|
|
||||||
WHERE expires_at < NOW() AND is_active = true;
|
|
||||||
|
|
||||||
-- Optional: Delete very old expired sessions (older than 7 days)
|
|
||||||
DELETE FROM dbsession
|
|
||||||
WHERE expires_at < NOW() - INTERVAL '7 days';
|
|
||||||
|
|
||||||
-- =============================================================================
|
|
||||||
-- STEP 3: Update Session user_id to Email Format
|
|
||||||
-- =============================================================================
|
|
||||||
-- Issue: All sessions use old username format instead of email
|
|
||||||
-- Fix: Update session user_id to use email format
|
|
||||||
|
|
||||||
UPDATE dbsession
|
|
||||||
SET user_id = 'oib@chello.at'
|
|
||||||
WHERE user_id = 'oibchello';
|
|
||||||
|
|
||||||
-- Verify session user_id updates
|
|
||||||
SELECT DISTINCT user_id FROM dbsession;
|
|
||||||
|
|
||||||
-- =============================================================================
|
|
||||||
-- STEP 4: Fix PublicStream Username Fields
|
|
||||||
-- =============================================================================
|
|
||||||
-- Issue: PublicStream has username/UID mismatches
|
|
||||||
-- Fix: Update username to match UID (email format)
|
|
||||||
|
|
||||||
-- Fix the existing user record
|
|
||||||
UPDATE publicstream
|
|
||||||
SET username = uid,
|
|
||||||
display_name = CASE
|
|
||||||
WHEN display_name = 'oibchello'
|
|
||||||
THEN split_part(uid, '@', 1) -- Use email prefix as display name
|
|
||||||
ELSE display_name
|
|
||||||
END
|
|
||||||
WHERE uid = 'oib@chello.at';
|
|
||||||
|
|
||||||
-- Verify the fix
|
|
||||||
SELECT uid, username, display_name FROM publicstream WHERE uid = 'oib@chello.at';
|
|
||||||
|
|
||||||
-- =============================================================================
|
|
||||||
-- STEP 5: Remove Orphaned Records for Deleted User
|
|
||||||
-- =============================================================================
|
|
||||||
-- Issue: Records exist for 'oib@bubuit.net' but no user exists
|
|
||||||
-- Fix: Remove orphaned records
|
|
||||||
|
|
||||||
-- Remove orphaned quota record
|
|
||||||
DELETE FROM userquota WHERE uid = 'oib@bubuit.net';
|
|
||||||
|
|
||||||
-- Remove orphaned stream record
|
|
||||||
DELETE FROM publicstream WHERE uid = 'oib@bubuit.net';
|
|
||||||
|
|
||||||
-- Verify orphaned records are removed
|
|
||||||
SELECT 'userquota' as table_name, COUNT(*) as count FROM userquota WHERE uid = 'oib@bubuit.net'
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'publicstream' as table_name, COUNT(*) as count FROM publicstream WHERE uid = 'oib@bubuit.net';
|
|
||||||
|
|
||||||
-- =============================================================================
|
|
||||||
-- VERIFICATION QUERIES
|
|
||||||
-- =============================================================================
|
|
||||||
-- Run these to verify all issues are fixed
|
|
||||||
|
|
||||||
-- 1. Check user table consistency
|
|
||||||
SELECT
|
|
||||||
email,
|
|
||||||
username,
|
|
||||||
display_name,
|
|
||||||
CASE WHEN email = username THEN '✓' ELSE '✗' END as email_username_match,
|
|
||||||
CASE WHEN display_name != '' THEN '✓' ELSE '✗' END as has_display_name
|
|
||||||
FROM "user";
|
|
||||||
|
|
||||||
-- 2. Check session table health
|
|
||||||
SELECT
|
|
||||||
COUNT(*) as total_sessions,
|
|
||||||
COUNT(CASE WHEN is_active THEN 1 END) as active_sessions,
|
|
||||||
COUNT(CASE WHEN expires_at < NOW() AND is_active THEN 1 END) as expired_but_active,
|
|
||||||
COUNT(CASE WHEN expires_at - created_at > INTERVAL '20 hours' THEN 1 END) as long_duration_sessions
|
|
||||||
FROM dbsession;
|
|
||||||
|
|
||||||
-- 3. Check PublicStream consistency
|
|
||||||
SELECT
|
|
||||||
uid,
|
|
||||||
username,
|
|
||||||
display_name,
|
|
||||||
CASE WHEN uid = username THEN '✓' ELSE '✗' END as uid_username_match
|
|
||||||
FROM publicstream;
|
|
||||||
|
|
||||||
-- 4. Check referential integrity
|
|
||||||
SELECT
|
|
||||||
'Users' as entity,
|
|
||||||
COUNT(*) as count
|
|
||||||
FROM "user"
|
|
||||||
UNION ALL
|
|
||||||
SELECT
|
|
||||||
'UserQuota records',
|
|
||||||
COUNT(*)
|
|
||||||
FROM userquota
|
|
||||||
UNION ALL
|
|
||||||
SELECT
|
|
||||||
'PublicStream records',
|
|
||||||
COUNT(*)
|
|
||||||
FROM publicstream
|
|
||||||
UNION ALL
|
|
||||||
SELECT
|
|
||||||
'Active Sessions',
|
|
||||||
COUNT(*)
|
|
||||||
FROM dbsession WHERE is_active = true;
|
|
||||||
|
|
||||||
-- 5. Final validation - should return no rows if all issues are fixed
|
|
||||||
SELECT 'ISSUE: User email/username mismatch' as issue
|
|
||||||
FROM "user"
|
|
||||||
WHERE email != username
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: Expired active sessions'
|
|
||||||
FROM dbsession
|
|
||||||
WHERE expires_at < NOW() AND is_active = true
|
|
||||||
LIMIT 1
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: PublicStream UID/username mismatch'
|
|
||||||
FROM publicstream
|
|
||||||
WHERE uid != username
|
|
||||||
LIMIT 1
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: Orphaned quota records'
|
|
||||||
FROM userquota q
|
|
||||||
LEFT JOIN "user" u ON q.uid = u.email
|
|
||||||
WHERE u.email IS NULL
|
|
||||||
LIMIT 1
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: Orphaned stream records'
|
|
||||||
FROM publicstream p
|
|
||||||
LEFT JOIN "user" u ON p.uid = u.email
|
|
||||||
WHERE u.email IS NULL
|
|
||||||
LIMIT 1;
|
|
||||||
|
|
||||||
-- If the final query returns no rows, all legacy issues are fixed! ✅
|
|
@ -1,31 +0,0 @@
|
|||||||
-- Cleanup script for old format user 'oibchello'
|
|
||||||
-- This user has username-based UID instead of email-based UID
|
|
||||||
|
|
||||||
-- Show what will be deleted before deletion
|
|
||||||
SELECT 'publicstream entries to delete:' as info;
|
|
||||||
SELECT uid, username, storage_bytes, created_at FROM publicstream WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
SELECT 'uploadlog entries to delete:' as info;
|
|
||||||
SELECT COUNT(*) as count, uid FROM uploadlog WHERE uid = 'oibchello' GROUP BY uid;
|
|
||||||
|
|
||||||
SELECT 'userquota entries to delete:' as info;
|
|
||||||
SELECT uid FROM userquota WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Delete from all related tables
|
|
||||||
-- Start with dependent tables first
|
|
||||||
DELETE FROM uploadlog WHERE uid = 'oibchello';
|
|
||||||
DELETE FROM userquota WHERE uid = 'oibchello';
|
|
||||||
DELETE FROM publicstream WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Verify cleanup
|
|
||||||
SELECT 'Remaining entries for oibchello in publicstream:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM publicstream WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
SELECT 'Remaining entries for oibchello in uploadlog:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM uploadlog WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
SELECT 'Remaining entries for oibchello in userquota:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM userquota WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
SELECT 'Total remaining old format entries in publicstream:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM publicstream WHERE uid NOT LIKE '%@%' OR uid = username;
|
|
@ -1,28 +0,0 @@
|
|||||||
-- Cleanup script for old format user entries
|
|
||||||
-- Removes users with username-based UIDs instead of email-based UIDs
|
|
||||||
|
|
||||||
-- Show what will be deleted before deletion
|
|
||||||
SELECT 'publicstream entries to delete:' as info;
|
|
||||||
SELECT uid, username, storage_bytes, created_at FROM publicstream WHERE uid IN ('devuser', 'oibchello');
|
|
||||||
|
|
||||||
SELECT 'uploadlog entries to delete:' as info;
|
|
||||||
SELECT COUNT(*) as count, uid FROM uploadlog WHERE uid IN ('devuser', 'oibchello') GROUP BY uid;
|
|
||||||
|
|
||||||
SELECT 'userquota entries to delete:' as info;
|
|
||||||
SELECT uid, quota_bytes, used_bytes FROM userquota WHERE uid IN ('devuser', 'oibchello');
|
|
||||||
|
|
||||||
-- Delete from all related tables
|
|
||||||
-- Start with dependent tables first
|
|
||||||
DELETE FROM uploadlog WHERE uid IN ('devuser', 'oibchello');
|
|
||||||
DELETE FROM userquota WHERE uid IN ('devuser', 'oibchello');
|
|
||||||
DELETE FROM publicstream WHERE uid IN ('devuser', 'oibchello');
|
|
||||||
|
|
||||||
-- Verify cleanup
|
|
||||||
SELECT 'Remaining old format entries in publicstream:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM publicstream WHERE uid NOT LIKE '%@%' OR uid = username;
|
|
||||||
|
|
||||||
SELECT 'Remaining old format entries in uploadlog:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM uploadlog WHERE uid NOT LIKE '%@%';
|
|
||||||
|
|
||||||
SELECT 'Remaining old format entries in userquota:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM userquota WHERE uid NOT LIKE '%@%';
|
|
@ -1,17 +0,0 @@
|
|||||||
-- Cleanup script for orphaned uploadlog entries
|
|
||||||
-- These entries have username-based UIDs that should have been deleted with the user
|
|
||||||
|
|
||||||
-- Show what will be deleted
|
|
||||||
SELECT 'Orphaned uploadlog entries to delete:' as info;
|
|
||||||
SELECT uid, filename, processed_filename, created_at FROM uploadlog WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Delete the orphaned entries
|
|
||||||
DELETE FROM uploadlog WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Verify cleanup
|
|
||||||
SELECT 'Remaining uploadlog entries for oibchello:' as info;
|
|
||||||
SELECT COUNT(*) as count FROM uploadlog WHERE uid = 'oibchello';
|
|
||||||
|
|
||||||
-- Show all remaining uploadlog entries
|
|
||||||
SELECT 'All remaining uploadlog entries:' as info;
|
|
||||||
SELECT uid, filename, created_at FROM uploadlog ORDER BY created_at DESC;
|
|
@ -1,6 +0,0 @@
|
|||||||
-- Cleanup remaining orphaned uploadlog entries for devuser
|
|
||||||
DELETE FROM uploadlog WHERE uid = 'devuser';
|
|
||||||
|
|
||||||
-- Verify cleanup
|
|
||||||
SELECT 'All remaining uploadlog entries after cleanup:' as info;
|
|
||||||
SELECT uid, filename, created_at FROM uploadlog ORDER BY created_at DESC;
|
|
@ -1,307 +0,0 @@
|
|||||||
--
|
|
||||||
-- PostgreSQL database dump
|
|
||||||
--
|
|
||||||
|
|
||||||
-- Dumped from database version 15.13 (Debian 15.13-0+deb12u1)
|
|
||||||
-- Dumped by pg_dump version 15.13 (Debian 15.13-0+deb12u1)
|
|
||||||
|
|
||||||
SET statement_timeout = 0;
|
|
||||||
SET lock_timeout = 0;
|
|
||||||
SET idle_in_transaction_session_timeout = 0;
|
|
||||||
SET client_encoding = 'UTF8';
|
|
||||||
SET standard_conforming_strings = on;
|
|
||||||
SELECT pg_catalog.set_config('search_path', '', false);
|
|
||||||
SET check_function_bodies = false;
|
|
||||||
SET xmloption = content;
|
|
||||||
SET client_min_messages = warning;
|
|
||||||
SET row_security = off;
|
|
||||||
|
|
||||||
SET default_tablespace = '';
|
|
||||||
|
|
||||||
SET default_table_access_method = heap;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: alembic_version; Type: TABLE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE TABLE public.alembic_version (
|
|
||||||
version_num character varying(32) NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public.alembic_version OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: dbsession; Type: TABLE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE TABLE public.dbsession (
|
|
||||||
token character varying NOT NULL,
|
|
||||||
uid character varying NOT NULL,
|
|
||||||
ip_address character varying NOT NULL,
|
|
||||||
user_agent character varying NOT NULL,
|
|
||||||
created_at timestamp without time zone NOT NULL,
|
|
||||||
expires_at timestamp without time zone NOT NULL,
|
|
||||||
is_active boolean NOT NULL,
|
|
||||||
last_activity timestamp without time zone NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public.dbsession OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: publicstream; Type: TABLE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE TABLE public.publicstream (
|
|
||||||
uid character varying NOT NULL,
|
|
||||||
username character varying,
|
|
||||||
storage_bytes integer NOT NULL,
|
|
||||||
mtime integer NOT NULL,
|
|
||||||
last_updated timestamp without time zone,
|
|
||||||
created_at timestamp without time zone NOT NULL,
|
|
||||||
updated_at timestamp without time zone NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public.publicstream OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: uploadlog; Type: TABLE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE TABLE public.uploadlog (
|
|
||||||
id integer NOT NULL,
|
|
||||||
uid character varying NOT NULL,
|
|
||||||
ip character varying NOT NULL,
|
|
||||||
filename character varying,
|
|
||||||
processed_filename character varying,
|
|
||||||
size_bytes integer NOT NULL,
|
|
||||||
created_at timestamp without time zone NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public.uploadlog OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: uploadlog_id_seq; Type: SEQUENCE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE SEQUENCE public.uploadlog_id_seq
|
|
||||||
AS integer
|
|
||||||
START WITH 1
|
|
||||||
INCREMENT BY 1
|
|
||||||
NO MINVALUE
|
|
||||||
NO MAXVALUE
|
|
||||||
CACHE 1;
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public.uploadlog_id_seq OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: uploadlog_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER SEQUENCE public.uploadlog_id_seq OWNED BY public.uploadlog.id;
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: user; Type: TABLE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE TABLE public."user" (
|
|
||||||
token_created timestamp without time zone NOT NULL,
|
|
||||||
email character varying NOT NULL,
|
|
||||||
username character varying NOT NULL,
|
|
||||||
token character varying NOT NULL,
|
|
||||||
confirmed boolean NOT NULL,
|
|
||||||
ip character varying NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public."user" OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: userquota; Type: TABLE; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE TABLE public.userquota (
|
|
||||||
uid character varying NOT NULL,
|
|
||||||
storage_bytes integer NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE public.userquota OWNER TO d2s;
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: uploadlog id; Type: DEFAULT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.uploadlog ALTER COLUMN id SET DEFAULT nextval('public.uploadlog_id_seq'::regclass);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Data for Name: alembic_version; Type: TABLE DATA; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
COPY public.alembic_version (version_num) FROM stdin;
|
|
||||||
\.
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Data for Name: dbsession; Type: TABLE DATA; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
COPY public.dbsession (token, uid, ip_address, user_agent, created_at, expires_at, is_active, last_activity) FROM stdin;
|
|
||||||
6Y3PfCj-Mk3qLRttXCul8GTFZU9XWZtoHjk9I4EqnTE oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:32:21.725005 2025-08-07 10:32:21.724909 t 2025-08-06 10:32:21.725012
|
|
||||||
uGnwnfsAUzbNJZoqYsbT__tVxqfl4NtOD04UKYp8FEY oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:35:43.931018 2025-08-07 10:35:43.930918 t 2025-08-06 10:35:43.931023
|
|
||||||
OmKl-RrM8D4624xmNQigD3tdG4aXq8CzUq7Ch0qEhP4 oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:36:02.758938 2025-08-07 10:36:02.758873 t 2025-08-06 10:36:02.758941
|
|
||||||
gGpgdAbmpwY3a-zY1Ri92l7hUEjg-GyIt1o2kIDwBE8 oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:45:59.701084 2025-08-07 10:45:59.70098 t 2025-08-06 10:45:59.701091
|
|
||||||
GT9OKNxnhThcFXKvMBBVop7kczUH-4fE4bkCcRd17xE oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:46:14.181147 2025-08-07 10:46:14.181055 t 2025-08-06 10:46:14.181152
|
|
||||||
Ok0mwpRLa5Fuimt9eN0l-xUaaCmpipokTkOILSxJNuA oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:46:27.910441 2025-08-07 10:46:27.91036 t 2025-08-06 10:46:27.910444
|
|
||||||
DCTd4zCq_Lp_GxdwI14hFwZiDjfvNVvQrUVznllTdIA oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:46:35.928008 2025-08-07 10:46:35.927945 t 2025-08-06 10:46:35.928011
|
|
||||||
dtv0uti4QUudgMTnS1NRzZ9nD9vhLO1stM5bdXL4I1o oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:46:36.104031 2025-08-07 10:46:36.103944 t 2025-08-06 10:46:36.104034
|
|
||||||
NHZQSW6C2H-5Wq6Un6NqcAmnfSt1PqJeYJnwFKSjAss oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:51:33.897379 2025-08-07 10:51:33.897295 t 2025-08-06 10:51:33.897385
|
|
||||||
yYZeeLyXmwpyr8Uu1szIyyoIpLc7qiWfQwB57f4kqNI oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:53:43.711315 2025-08-07 10:53:43.711223 t 2025-08-06 10:53:43.71132
|
|
||||||
KhH9FO4D15l3-SUUkFHjR5Oj1N6Ld-NLmkzaM1QMhtU oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 10:56:22.050456 2025-08-07 10:56:22.050377 t 2025-08-06 10:56:22.050461
|
|
||||||
zPQqqHEY4l7ZhLrBPBnvQdsQhQj1_j0n9H6CCnIAME8 oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 11:29:49.412786 2025-08-07 11:29:49.412706 t 2025-08-06 11:29:49.412792
|
|
||||||
oxYZ9qTaezYliV6UtsI62RpPClj7rIAVXK_1FB3gYMQ oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 11:34:42.099366 2025-08-07 11:34:42.099276 t 2025-08-06 11:34:42.099371
|
|
||||||
Ml6aHvae2EPXs9SWZX1BI_mNKgasjIVRMWnUSwKwixQ oib@chello.at 127.0.0.1 Mozilla/5.0 (X11; Linux x86_64; rv:128.0) Gecko/20100101 Firefox/128.0 2025-08-06 11:38:06.002942 2025-08-07 11:38:06.002845 t 2025-08-06 11:38:06.002949
|
|
||||||
\.
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Data for Name: publicstream; Type: TABLE DATA; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
COPY public.publicstream (uid, username, storage_bytes, mtime, last_updated, created_at, updated_at) FROM stdin;
|
|
||||||
oib@chello.at oibchello 16151127 1754453233 2025-08-06 06:22:53.97839 2025-08-06 06:07:13.525122 2025-08-06 06:07:13.525126
|
|
||||||
\.
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Data for Name: uploadlog; Type: TABLE DATA; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
COPY public.uploadlog (id, uid, ip, filename, processed_filename, size_bytes, created_at) FROM stdin;
|
|
||||||
111 oib@chello.at 127.0.0.1 Taös - Bobstep [ Dubstep ] [1YGV5cNJrt0].opus 210388e1-2a9b-4b7c-a72f-d4059111ee80.opus 688750 2025-08-06 06:22:53.970258
|
|
||||||
112 oib@chello.at backfilled 107_5e6c3567-7457-48f4-83fc-f3073f065718.opus 107_5e6c3567-7457-48f4-83fc-f3073f065718.opus 671050 2025-08-06 08:14:43.312825
|
|
||||||
99 oib@chello.at 127.0.0.1 Pendulum - Set Me On Fire (Rasta Dubstep Rastep Raggastep) [ndShSlWMaeA].opus b0afe675-de49-43eb-ab77-86e592934342.opus 1051596 2025-08-06 06:07:13.504649
|
|
||||||
100 oib@chello.at 127.0.0.1 Roots Reggae (1976) [Unreleased Album] Judah Khamani - Twelve Gates of Rebirth [94NDoPCjRL0].opus 6e0e4d7c-31a6-4d3b-ad26-1ccb8aeaaf55.opus 4751764 2025-08-06 06:08:00.96213
|
|
||||||
101 oib@chello.at backfilled 98_15ba146a-8285-4233-9d44-e77e5fc19cd6.opus 98_15ba146a-8285-4233-9d44-e77e5fc19cd6.opus 805775 2025-08-06 08:05:27.805988
|
|
||||||
102 oib@chello.at backfilled 97_74e975bf-22f8-4b98-8111-dbcd195a62a2.opus 97_74e975bf-22f8-4b98-8111-dbcd195a62a2.opus 775404 2025-08-06 07:57:50.570271
|
|
||||||
103 oib@chello.at backfilled 99_b0afe675-de49-43eb-ab77-86e592934342.opus 99_b0afe675-de49-43eb-ab77-86e592934342.opus 1051596 2025-08-06 08:07:13.493002
|
|
||||||
104 oib@chello.at backfilled 100_6e0e4d7c-31a6-4d3b-ad26-1ccb8aeaaf55.opus 100_6e0e4d7c-31a6-4d3b-ad26-1ccb8aeaaf55.opus 4751764 2025-08-06 08:08:00.944561
|
|
||||||
105 oib@chello.at backfilled stream.opus stream.opus 7384026 2025-08-06 08:08:01.540555
|
|
||||||
106 oib@chello.at 127.0.0.1 Roots Reggae (1973) [Unreleased Album] Judah Khamani - Scrolls of the Fire Lion🔥 [wZvlYr5Baa8].opus 516c2ea1-6bf3-4461-91c6-e7c47e913743.opus 4760432 2025-08-06 06:14:17.072377
|
|
||||||
107 oib@chello.at 127.0.0.1 Reggae Shark Dubstep remix [101PfefUH5A].opus 5e6c3567-7457-48f4-83fc-f3073f065718.opus 671050 2025-08-06 06:14:43.326351
|
|
||||||
108 oib@chello.at 127.0.0.1 SiriuX - RastaFari (Dubstep REMIX) [VVAWgX0IgxY].opus 25aa73c3-2a9c-4659-835d-8280a0381dc4.opus 939266 2025-08-06 06:17:55.519608
|
|
||||||
109 oib@chello.at 127.0.0.1 I'm Death, Straight Up | DEATH WHISTLE (Wubbaduck x Auphinity DUBSTEP REMIX) [BK6_6RB2h64].opus 9c9b6356-d5b7-427f-9179-942593cd97e6.opus 805775 2025-08-06 06:19:41.29278
|
|
||||||
110 oib@chello.at 127.0.0.1 N.A.S.A. Way Down (feat. RZA, Barbie Hatch, & John Frusciante).mp3 72c4ce3e-c991-4fb4-b5ab-b2f83b6f616d.opus 901315 2025-08-06 06:22:01.727741
|
|
||||||
113 oib@chello.at backfilled 110_72c4ce3e-c991-4fb4-b5ab-b2f83b6f616d.opus 110_72c4ce3e-c991-4fb4-b5ab-b2f83b6f616d.opus 901315 2025-08-06 08:22:01.71671
|
|
||||||
114 oib@chello.at backfilled 108_25aa73c3-2a9c-4659-835d-8280a0381dc4.opus 108_25aa73c3-2a9c-4659-835d-8280a0381dc4.opus 939266 2025-08-06 08:17:55.511047
|
|
||||||
115 oib@chello.at backfilled 106_516c2ea1-6bf3-4461-91c6-e7c47e913743.opus 106_516c2ea1-6bf3-4461-91c6-e7c47e913743.opus 4760432 2025-08-06 08:14:17.057068
|
|
||||||
116 oib@chello.at backfilled 109_9c9b6356-d5b7-427f-9179-942593cd97e6.opus 109_9c9b6356-d5b7-427f-9179-942593cd97e6.opus 805775 2025-08-06 08:19:41.282058
|
|
||||||
117 oib@chello.at backfilled 111_210388e1-2a9b-4b7c-a72f-d4059111ee80.opus 111_210388e1-2a9b-4b7c-a72f-d4059111ee80.opus 688750 2025-08-06 08:22:53.960209
|
|
||||||
\.
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Data for Name: user; Type: TABLE DATA; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
COPY public."user" (token_created, email, username, token, confirmed, ip) FROM stdin;
|
|
||||||
2025-08-06 11:37:50.164201 oib@chello.at oibchello 69aef338-4f18-44b2-96bb-403245901d06 t 127.0.0.1
|
|
||||||
\.
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Data for Name: userquota; Type: TABLE DATA; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
COPY public.userquota (uid, storage_bytes) FROM stdin;
|
|
||||||
oib@chello.at 16151127
|
|
||||||
\.
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: uploadlog_id_seq; Type: SEQUENCE SET; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
SELECT pg_catalog.setval('public.uploadlog_id_seq', 117, true);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: alembic_version alembic_version_pkc; Type: CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.alembic_version
|
|
||||||
ADD CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: dbsession dbsession_pkey; Type: CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.dbsession
|
|
||||||
ADD CONSTRAINT dbsession_pkey PRIMARY KEY (token);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: publicstream publicstream_pkey; Type: CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.publicstream
|
|
||||||
ADD CONSTRAINT publicstream_pkey PRIMARY KEY (uid);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: uploadlog uploadlog_pkey; Type: CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.uploadlog
|
|
||||||
ADD CONSTRAINT uploadlog_pkey PRIMARY KEY (id);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: user user_pkey; Type: CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public."user"
|
|
||||||
ADD CONSTRAINT user_pkey PRIMARY KEY (email);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: userquota userquota_pkey; Type: CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.userquota
|
|
||||||
ADD CONSTRAINT userquota_pkey PRIMARY KEY (uid);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: ix_publicstream_username; Type: INDEX; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE INDEX ix_publicstream_username ON public.publicstream USING btree (username);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: ix_user_username; Type: INDEX; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
CREATE UNIQUE INDEX ix_user_username ON public."user" USING btree (username);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- Name: dbsession dbsession_user_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: d2s
|
|
||||||
--
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.dbsession
|
|
||||||
ADD CONSTRAINT dbsession_user_id_fkey FOREIGN KEY (uid) REFERENCES public."user"(email);
|
|
||||||
|
|
||||||
|
|
||||||
--
|
|
||||||
-- PostgreSQL database dump complete
|
|
||||||
--
|
|
||||||
|
|
@ -1,131 +0,0 @@
|
|||||||
# Authentication Logic Consolidation
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The authentication logic has been consolidated from multiple scattered files into a single, centralized `AuthManager` class. This improves maintainability, reduces code duplication, and provides a consistent authentication interface.
|
|
||||||
|
|
||||||
## Files Changed
|
|
||||||
|
|
||||||
### 1. New Centralized Module
|
|
||||||
- **`static/auth-manager.js`** - New centralized authentication manager class
|
|
||||||
|
|
||||||
### 2. Refactored Files
|
|
||||||
- **`static/auth.js`** - Simplified to use AuthManager
|
|
||||||
- **`static/magic-login.js`** - Simplified to use AuthManager
|
|
||||||
- **`static/cleanup-auth.js`** - Simplified to use AuthManager
|
|
||||||
|
|
||||||
## AuthManager Features
|
|
||||||
|
|
||||||
### Core Functionality
|
|
||||||
- **Centralized State Management** - Single source of truth for authentication state
|
|
||||||
- **Cookie & localStorage Management** - Consistent handling of auth data storage
|
|
||||||
- **Magic Link Processing** - Handles both URL-based and token-based magic login
|
|
||||||
- **Authentication Polling** - Periodic state checks with caching and debouncing
|
|
||||||
- **User Session Management** - Login, logout, and account deletion
|
|
||||||
|
|
||||||
### Key Methods
|
|
||||||
- `initialize()` - Initialize the auth manager and handle magic login
|
|
||||||
- `setAuthState(email, username, token)` - Set authentication state
|
|
||||||
- `clearAuthState()` - Clear all authentication data
|
|
||||||
- `isAuthenticated()` - Check current authentication status
|
|
||||||
- `getCurrentUser()` - Get current user data
|
|
||||||
- `logout()` - Perform logout and redirect
|
|
||||||
- `deleteAccount()` - Handle account deletion
|
|
||||||
- `cleanupAuthState(email)` - Clean up inconsistent auth state
|
|
||||||
|
|
||||||
### Authentication Flow
|
|
||||||
1. **Magic Login Detection** - Checks URL parameters for login tokens/success
|
|
||||||
2. **User Info Retrieval** - Fetches email from `/api/me` endpoint
|
|
||||||
3. **State Setting** - Sets email as primary UID, username for display
|
|
||||||
4. **UI Updates** - Updates body classes and initializes user session
|
|
||||||
5. **Navigation** - Redirects to user profile page
|
|
||||||
|
|
||||||
## Data Storage Strategy
|
|
||||||
|
|
||||||
### localStorage Keys
|
|
||||||
- `uid` - Primary identifier (email-based)
|
|
||||||
- `user_email` - Explicit email storage
|
|
||||||
- `username` - Display name (separate from UID)
|
|
||||||
- `authToken` - Authentication token
|
|
||||||
- `isAuthenticated` - Boolean authentication state
|
|
||||||
- `uid_time` - Session timestamp
|
|
||||||
|
|
||||||
### Cookie Strategy
|
|
||||||
- `uid` - Email-based UID with `SameSite=Lax`
|
|
||||||
- `authToken` - Auth token with `SameSite=Lax; Secure`
|
|
||||||
- `isAuthenticated` - Boolean flag with `SameSite=Lax`
|
|
||||||
|
|
||||||
## Removed Redundancy
|
|
||||||
|
|
||||||
### Eliminated Duplicate Code
|
|
||||||
- **User info fetching** - Centralized in `fetchUserInfo()`
|
|
||||||
- **Auth state setting** - Centralized in `setAuthState()`
|
|
||||||
- **Cookie management** - Centralized in `setAuthState()` and `clearAuthState()`
|
|
||||||
- **Magic login processing** - Centralized in `processMagicLogin()` and `processTokenLogin()`
|
|
||||||
|
|
||||||
### Removed Fields
|
|
||||||
- `confirmed_uid` - Was duplicate of `uid`, now eliminated
|
|
||||||
|
|
||||||
## Backward Compatibility
|
|
||||||
|
|
||||||
### Global Functions (Legacy Support)
|
|
||||||
- `window.getCurrentUser()` - Get current user data
|
|
||||||
- `window.isAuthenticated()` - Check authentication status
|
|
||||||
- `window.logout()` - Perform logout
|
|
||||||
- `window.cleanupAuthState(email)` - Clean up auth state
|
|
||||||
|
|
||||||
### Existing Function Exports
|
|
||||||
- `initMagicLogin()` - Maintained in magic-login.js for compatibility
|
|
||||||
- `cleanupAuthState()` - Maintained in cleanup-auth.js for compatibility
|
|
||||||
|
|
||||||
## Benefits Achieved
|
|
||||||
|
|
||||||
### 1. **Maintainability**
|
|
||||||
- Single source of authentication logic
|
|
||||||
- Consistent error handling and logging
|
|
||||||
- Easier to debug and modify
|
|
||||||
|
|
||||||
### 2. **Performance**
|
|
||||||
- Reduced code duplication
|
|
||||||
- Optimized caching and debouncing
|
|
||||||
- Fewer redundant API calls
|
|
||||||
|
|
||||||
### 3. **Reliability**
|
|
||||||
- Consistent state management
|
|
||||||
- Proper cleanup on logout
|
|
||||||
- Robust error handling
|
|
||||||
|
|
||||||
### 4. **Security**
|
|
||||||
- Consistent cookie security attributes
|
|
||||||
- Proper state clearing on logout
|
|
||||||
- Centralized validation
|
|
||||||
|
|
||||||
## Migration Notes
|
|
||||||
|
|
||||||
### For Developers
|
|
||||||
- Import `authManager` from `./auth-manager.js` for new code
|
|
||||||
- Use `authManager.isAuthenticated()` instead of manual checks
|
|
||||||
- Use `authManager.getCurrentUser()` for user data
|
|
||||||
- Legacy global functions still work for existing code
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
- Test magic link login (both URL and token-based)
|
|
||||||
- Test authentication state persistence
|
|
||||||
- Test logout and account deletion
|
|
||||||
- Test authentication polling and state changes
|
|
||||||
|
|
||||||
## Future Improvements
|
|
||||||
|
|
||||||
### Potential Enhancements
|
|
||||||
1. **Token Refresh** - Automatic token renewal
|
|
||||||
2. **Session Timeout** - Configurable session expiration
|
|
||||||
3. **Multi-tab Sync** - Better cross-tab authentication sync
|
|
||||||
4. **Audit Logging** - Enhanced authentication event logging
|
|
||||||
5. **Rate Limiting** - Protection against auth abuse
|
|
||||||
|
|
||||||
### Configuration Options
|
|
||||||
Consider adding configuration for:
|
|
||||||
- Polling intervals
|
|
||||||
- Cache TTL values
|
|
||||||
- Debug logging levels
|
|
||||||
- Cookie security settings
|
|
@ -1,221 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Execute Database Legacy Data Cleanup
|
|
||||||
Fixes issues identified in the database analysis using direct SQL execution
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from sqlmodel import Session, text
|
|
||||||
from database import engine
|
|
||||||
|
|
||||||
def execute_step(session, step_name, query, description):
|
|
||||||
"""Execute a cleanup step and report results"""
|
|
||||||
print(f"\n=== {step_name} ===")
|
|
||||||
print(f"Description: {description}")
|
|
||||||
print(f"Query: {query}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = session.exec(text(query))
|
|
||||||
if query.strip().upper().startswith('SELECT'):
|
|
||||||
rows = result.fetchall()
|
|
||||||
print(f"Result: {len(rows)} rows")
|
|
||||||
for row in rows:
|
|
||||||
print(f" {row}")
|
|
||||||
else:
|
|
||||||
session.commit()
|
|
||||||
print(f"✅ Success: {result.rowcount} rows affected")
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error: {e}")
|
|
||||||
session.rollback()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Execute database cleanup step by step"""
|
|
||||||
print("=== DATABASE LEGACY DATA CLEANUP ===")
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
success_count = 0
|
|
||||||
total_steps = 0
|
|
||||||
|
|
||||||
# Step 1: Fix User Table - Update username to match email format
|
|
||||||
total_steps += 1
|
|
||||||
if execute_step(
|
|
||||||
session,
|
|
||||||
"STEP 1: Fix User Table",
|
|
||||||
"""UPDATE "user"
|
|
||||||
SET username = email,
|
|
||||||
display_name = CASE
|
|
||||||
WHEN display_name = '' OR display_name IS NULL
|
|
||||||
THEN split_part(email, '@', 1)
|
|
||||||
ELSE display_name
|
|
||||||
END
|
|
||||||
WHERE email = 'oib@chello.at'""",
|
|
||||||
"Update username to match email format and set display_name"
|
|
||||||
):
|
|
||||||
success_count += 1
|
|
||||||
|
|
||||||
# Verify Step 1
|
|
||||||
execute_step(
|
|
||||||
session,
|
|
||||||
"VERIFY STEP 1",
|
|
||||||
"""SELECT email, username, display_name, confirmed
|
|
||||||
FROM "user" WHERE email = 'oib@chello.at'""",
|
|
||||||
"Verify user table fix"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 2: Clean Up Expired Sessions
|
|
||||||
total_steps += 1
|
|
||||||
if execute_step(
|
|
||||||
session,
|
|
||||||
"STEP 2: Mark Expired Sessions Inactive",
|
|
||||||
"""UPDATE dbsession
|
|
||||||
SET is_active = false
|
|
||||||
WHERE expires_at < NOW() AND is_active = true""",
|
|
||||||
"Mark expired sessions as inactive for security"
|
|
||||||
):
|
|
||||||
success_count += 1
|
|
||||||
|
|
||||||
# Verify Step 2
|
|
||||||
execute_step(
|
|
||||||
session,
|
|
||||||
"VERIFY STEP 2",
|
|
||||||
"""SELECT COUNT(*) as expired_active_sessions
|
|
||||||
FROM dbsession
|
|
||||||
WHERE expires_at < NOW() AND is_active = true""",
|
|
||||||
"Check for remaining expired active sessions"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 3: Update Session user_id to Email Format
|
|
||||||
total_steps += 1
|
|
||||||
if execute_step(
|
|
||||||
session,
|
|
||||||
"STEP 3: Update Session user_id",
|
|
||||||
"""UPDATE dbsession
|
|
||||||
SET user_id = 'oib@chello.at'
|
|
||||||
WHERE user_id = 'oibchello'""",
|
|
||||||
"Update session user_id to use email format"
|
|
||||||
):
|
|
||||||
success_count += 1
|
|
||||||
|
|
||||||
# Verify Step 3
|
|
||||||
execute_step(
|
|
||||||
session,
|
|
||||||
"VERIFY STEP 3",
|
|
||||||
"""SELECT DISTINCT user_id FROM dbsession""",
|
|
||||||
"Check session user_id values"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 4: Fix PublicStream Username Fields
|
|
||||||
total_steps += 1
|
|
||||||
if execute_step(
|
|
||||||
session,
|
|
||||||
"STEP 4: Fix PublicStream",
|
|
||||||
"""UPDATE publicstream
|
|
||||||
SET username = uid,
|
|
||||||
display_name = CASE
|
|
||||||
WHEN display_name = 'oibchello'
|
|
||||||
THEN split_part(uid, '@', 1)
|
|
||||||
ELSE display_name
|
|
||||||
END
|
|
||||||
WHERE uid = 'oib@chello.at'""",
|
|
||||||
"Update PublicStream username to match UID"
|
|
||||||
):
|
|
||||||
success_count += 1
|
|
||||||
|
|
||||||
# Verify Step 4
|
|
||||||
execute_step(
|
|
||||||
session,
|
|
||||||
"VERIFY STEP 4",
|
|
||||||
"""SELECT uid, username, display_name
|
|
||||||
FROM publicstream WHERE uid = 'oib@chello.at'""",
|
|
||||||
"Verify PublicStream fix"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 5: Remove Orphaned Records
|
|
||||||
total_steps += 1
|
|
||||||
orphan_success = True
|
|
||||||
|
|
||||||
# Remove orphaned quota record
|
|
||||||
if not execute_step(
|
|
||||||
session,
|
|
||||||
"STEP 5a: Remove Orphaned Quota",
|
|
||||||
"""DELETE FROM userquota WHERE uid = 'oib@bubuit.net'""",
|
|
||||||
"Remove orphaned quota record for deleted user"
|
|
||||||
):
|
|
||||||
orphan_success = False
|
|
||||||
|
|
||||||
# Remove orphaned stream record
|
|
||||||
if not execute_step(
|
|
||||||
session,
|
|
||||||
"STEP 5b: Remove Orphaned Stream",
|
|
||||||
"""DELETE FROM publicstream WHERE uid = 'oib@bubuit.net'""",
|
|
||||||
"Remove orphaned stream record for deleted user"
|
|
||||||
):
|
|
||||||
orphan_success = False
|
|
||||||
|
|
||||||
if orphan_success:
|
|
||||||
success_count += 1
|
|
||||||
|
|
||||||
# Verify Step 5
|
|
||||||
execute_step(
|
|
||||||
session,
|
|
||||||
"VERIFY STEP 5",
|
|
||||||
"""SELECT 'userquota' as table_name, COUNT(*) as count
|
|
||||||
FROM userquota WHERE uid = 'oib@bubuit.net'
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'publicstream' as table_name, COUNT(*) as count
|
|
||||||
FROM publicstream WHERE uid = 'oib@bubuit.net'""",
|
|
||||||
"Verify orphaned records are removed"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Final Verification
|
|
||||||
print(f"\n=== FINAL VERIFICATION ===")
|
|
||||||
|
|
||||||
# Check for remaining issues
|
|
||||||
execute_step(
|
|
||||||
session,
|
|
||||||
"FINAL CHECK",
|
|
||||||
"""SELECT 'ISSUE: User email/username mismatch' as issue
|
|
||||||
FROM "user"
|
|
||||||
WHERE email != username
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: Expired active sessions'
|
|
||||||
FROM dbsession
|
|
||||||
WHERE expires_at < NOW() AND is_active = true
|
|
||||||
LIMIT 1
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: PublicStream UID/username mismatch'
|
|
||||||
FROM publicstream
|
|
||||||
WHERE uid != username
|
|
||||||
LIMIT 1
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: Orphaned quota records'
|
|
||||||
FROM userquota q
|
|
||||||
LEFT JOIN "user" u ON q.uid = u.email
|
|
||||||
WHERE u.email IS NULL
|
|
||||||
LIMIT 1
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'ISSUE: Orphaned stream records'
|
|
||||||
FROM publicstream p
|
|
||||||
LEFT JOIN "user" u ON p.uid = u.email
|
|
||||||
WHERE u.email IS NULL
|
|
||||||
LIMIT 1""",
|
|
||||||
"Check for any remaining legacy issues"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
print(f"\n=== CLEANUP SUMMARY ===")
|
|
||||||
print(f"Total steps: {total_steps}")
|
|
||||||
print(f"Successful steps: {success_count}")
|
|
||||||
print(f"Failed steps: {total_steps - success_count}")
|
|
||||||
|
|
||||||
if success_count == total_steps:
|
|
||||||
print("✅ All legacy database issues have been fixed!")
|
|
||||||
else:
|
|
||||||
print("⚠️ Some issues remain. Check the output above for details.")
|
|
||||||
|
|
||||||
return 0 if success_count == total_steps else 1
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main())
|
|
@ -1,174 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Fix Database Constraints and Legacy Data
|
|
||||||
Handles foreign key constraints properly during cleanup
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from sqlmodel import Session, text
|
|
||||||
from database import engine
|
|
||||||
|
|
||||||
def execute_query(session, query, description):
|
|
||||||
"""Execute a query and report results"""
|
|
||||||
print(f"\n{description}")
|
|
||||||
print(f"Query: {query}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = session.exec(text(query))
|
|
||||||
if query.strip().upper().startswith('SELECT'):
|
|
||||||
rows = result.fetchall()
|
|
||||||
print(f"Result: {len(rows)} rows")
|
|
||||||
for row in rows:
|
|
||||||
print(f" {row}")
|
|
||||||
else:
|
|
||||||
session.commit()
|
|
||||||
print(f"✅ Success: {result.rowcount} rows affected")
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error: {e}")
|
|
||||||
session.rollback()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Fix database constraints and legacy data"""
|
|
||||||
print("=== FIXING DATABASE CONSTRAINTS AND LEGACY DATA ===")
|
|
||||||
|
|
||||||
with Session(engine) as session:
|
|
||||||
|
|
||||||
# Step 1: First, let's temporarily drop the foreign key constraint
|
|
||||||
print("\n=== STEP 1: Handle Foreign Key Constraint ===")
|
|
||||||
|
|
||||||
# Check current constraint
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT conname, conrelid::regclass, confrelid::regclass
|
|
||||||
FROM pg_constraint
|
|
||||||
WHERE conname = 'dbsession_user_id_fkey'""",
|
|
||||||
"Check existing foreign key constraint"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Drop the constraint temporarily
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""ALTER TABLE dbsession DROP CONSTRAINT IF EXISTS dbsession_user_id_fkey""",
|
|
||||||
"Drop foreign key constraint temporarily"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 2: Update user table
|
|
||||||
print("\n=== STEP 2: Update User Table ===")
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""UPDATE "user"
|
|
||||||
SET username = email,
|
|
||||||
display_name = CASE
|
|
||||||
WHEN display_name = '' OR display_name IS NULL
|
|
||||||
THEN split_part(email, '@', 1)
|
|
||||||
ELSE display_name
|
|
||||||
END
|
|
||||||
WHERE email = 'oib@chello.at'""",
|
|
||||||
"Update user username to match email"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify user update
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT email, username, display_name FROM "user" WHERE email = 'oib@chello.at'""",
|
|
||||||
"Verify user table update"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 3: Update session user_id references
|
|
||||||
print("\n=== STEP 3: Update Session References ===")
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""UPDATE dbsession
|
|
||||||
SET user_id = 'oib@chello.at'
|
|
||||||
WHERE user_id = 'oibchello'""",
|
|
||||||
"Update session user_id to email format"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify session updates
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT DISTINCT user_id FROM dbsession""",
|
|
||||||
"Verify session user_id updates"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 4: Recreate the foreign key constraint
|
|
||||||
print("\n=== STEP 4: Recreate Foreign Key Constraint ===")
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""ALTER TABLE dbsession
|
|
||||||
ADD CONSTRAINT dbsession_user_id_fkey
|
|
||||||
FOREIGN KEY (user_id) REFERENCES "user"(username)""",
|
|
||||||
"Recreate foreign key constraint"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 5: Final verification - check for remaining issues
|
|
||||||
print("\n=== STEP 5: Final Verification ===")
|
|
||||||
|
|
||||||
# Check user email/username match
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT email, username,
|
|
||||||
CASE WHEN email = username THEN '✓ Match' ELSE '✗ Mismatch' END as status
|
|
||||||
FROM "user""",
|
|
||||||
"Check user email/username consistency"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check expired sessions
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT COUNT(*) as expired_active_sessions
|
|
||||||
FROM dbsession
|
|
||||||
WHERE expires_at < NOW() AND is_active = true""",
|
|
||||||
"Check for expired active sessions"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check PublicStream consistency
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT uid, username,
|
|
||||||
CASE WHEN uid = username THEN '✓ Match' ELSE '✗ Mismatch' END as status
|
|
||||||
FROM publicstream""",
|
|
||||||
"Check PublicStream UID/username consistency"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check for orphaned records
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT 'userquota' as table_name, COUNT(*) as orphaned_records
|
|
||||||
FROM userquota q
|
|
||||||
LEFT JOIN "user" u ON q.uid = u.email
|
|
||||||
WHERE u.email IS NULL
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'publicstream' as table_name, COUNT(*) as orphaned_records
|
|
||||||
FROM publicstream p
|
|
||||||
LEFT JOIN "user" u ON p.uid = u.email
|
|
||||||
WHERE u.email IS NULL""",
|
|
||||||
"Check for orphaned records"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Summary of current state
|
|
||||||
print("\n=== DATABASE STATE SUMMARY ===")
|
|
||||||
execute_query(
|
|
||||||
session,
|
|
||||||
"""SELECT
|
|
||||||
COUNT(DISTINCT u.email) as total_users,
|
|
||||||
COUNT(DISTINCT q.uid) as quota_records,
|
|
||||||
COUNT(DISTINCT p.uid) as stream_records,
|
|
||||||
COUNT(CASE WHEN s.is_active THEN 1 END) as active_sessions,
|
|
||||||
COUNT(CASE WHEN s.expires_at < NOW() AND s.is_active THEN 1 END) as expired_active_sessions
|
|
||||||
FROM "user" u
|
|
||||||
FULL OUTER JOIN userquota q ON u.email = q.uid
|
|
||||||
FULL OUTER JOIN publicstream p ON u.email = p.uid
|
|
||||||
FULL OUTER JOIN dbsession s ON u.username = s.user_id""",
|
|
||||||
"Database state summary"
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\n✅ Database cleanup completed!")
|
|
||||||
print("All legacy data issues should now be resolved.")
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main())
|
|
@ -1,13 +0,0 @@
|
|||||||
-- Migration script to update DBSession foreign key to reference user.email
|
|
||||||
-- Run this when no active sessions exist to avoid deadlocks
|
|
||||||
|
|
||||||
BEGIN;
|
|
||||||
|
|
||||||
-- Step 1: Drop the existing foreign key constraint if it exists
|
|
||||||
ALTER TABLE dbsession DROP CONSTRAINT IF EXISTS dbsession_user_id_fkey;
|
|
||||||
|
|
||||||
-- Step 2: Add the new foreign key constraint referencing user.email
|
|
||||||
ALTER TABLE dbsession ADD CONSTRAINT dbsession_uid_fkey
|
|
||||||
FOREIGN KEY (uid) REFERENCES "user"(email);
|
|
||||||
|
|
||||||
COMMIT;
|
|
@ -1,4 +1,4 @@
|
|||||||
bind = "0.0.0.0:8000"
|
bind = "0.0.0.0:8100"
|
||||||
workers = 2 # Tune based on available CPU cores
|
workers = 2 # Tune based on available CPU cores
|
||||||
worker_class = "uvicorn.workers.UvicornWorker"
|
worker_class = "uvicorn.workers.UvicornWorker"
|
||||||
timeout = 300 # Increased from 60 to 300 seconds (5 minutes)
|
timeout = 300 # Increased from 60 to 300 seconds (5 minutes)
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
-- Migration script to update DBSession foreign key to reference user.email
|
|
||||||
-- Run this when no active sessions exist to avoid deadlocks
|
|
||||||
|
|
||||||
BEGIN;
|
|
||||||
|
|
||||||
-- Step 1: Drop the existing foreign key constraint
|
|
||||||
ALTER TABLE dbsession DROP CONSTRAINT IF EXISTS dbsession_user_id_fkey;
|
|
||||||
|
|
||||||
-- Step 2: Add the new foreign key constraint referencing user.email
|
|
||||||
ALTER TABLE dbsession ADD CONSTRAINT dbsession_user_id_fkey
|
|
||||||
FOREIGN KEY (user_id) REFERENCES "user"(email);
|
|
||||||
|
|
||||||
COMMIT;
|
|
@ -1,168 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
UID Migration Script - Complete migration from username-based to email-based UIDs
|
|
||||||
|
|
||||||
This script completes the UID migration by updating remaining username-based UIDs
|
|
||||||
in the database to use proper email format.
|
|
||||||
|
|
||||||
Based on previous migration history:
|
|
||||||
- devuser -> oib@bubuit.net (as per migration memory)
|
|
||||||
- oibchello -> oib@chello.at (already completed)
|
|
||||||
"""
|
|
||||||
|
|
||||||
import psycopg2
|
|
||||||
import sys
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
# Database connection string
|
|
||||||
DATABASE_URL = "postgresql://d2s:kuTy4ZKs2VcjgDh6@localhost:5432/dictastream"
|
|
||||||
|
|
||||||
def log_message(message):
|
|
||||||
"""Log message with timestamp"""
|
|
||||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
print(f"[{timestamp}] {message}")
|
|
||||||
|
|
||||||
def check_current_state(cursor):
|
|
||||||
"""Check current state of UID migration"""
|
|
||||||
log_message("Checking current UID state...")
|
|
||||||
|
|
||||||
# Check publicstream table
|
|
||||||
cursor.execute("SELECT uid, username FROM publicstream WHERE uid NOT LIKE '%@%'")
|
|
||||||
non_email_uids = cursor.fetchall()
|
|
||||||
|
|
||||||
if non_email_uids:
|
|
||||||
log_message(f"Found {len(non_email_uids)} non-email UIDs in publicstream:")
|
|
||||||
for uid, username in non_email_uids:
|
|
||||||
log_message(f" - UID: {uid}, Username: {username}")
|
|
||||||
else:
|
|
||||||
log_message("All UIDs in publicstream are already in email format")
|
|
||||||
|
|
||||||
# Check userquota table
|
|
||||||
cursor.execute("SELECT uid FROM userquota WHERE uid NOT LIKE '%@%'")
|
|
||||||
quota_non_email_uids = cursor.fetchall()
|
|
||||||
|
|
||||||
if quota_non_email_uids:
|
|
||||||
log_message(f"Found {len(quota_non_email_uids)} non-email UIDs in userquota:")
|
|
||||||
for (uid,) in quota_non_email_uids:
|
|
||||||
log_message(f" - UID: {uid}")
|
|
||||||
else:
|
|
||||||
log_message("All UIDs in userquota are already in email format")
|
|
||||||
|
|
||||||
return non_email_uids, quota_non_email_uids
|
|
||||||
|
|
||||||
def migrate_uids(cursor):
|
|
||||||
"""Migrate remaining username-based UIDs to email format"""
|
|
||||||
log_message("Starting UID migration...")
|
|
||||||
|
|
||||||
# Migration mapping based on previous migration history
|
|
||||||
uid_mapping = {
|
|
||||||
'devuser': 'oib@bubuit.net'
|
|
||||||
}
|
|
||||||
|
|
||||||
migration_count = 0
|
|
||||||
|
|
||||||
for old_uid, new_uid in uid_mapping.items():
|
|
||||||
log_message(f"Migrating UID: {old_uid} -> {new_uid}")
|
|
||||||
|
|
||||||
# Update publicstream table
|
|
||||||
cursor.execute(
|
|
||||||
"UPDATE publicstream SET uid = %s WHERE uid = %s",
|
|
||||||
(new_uid, old_uid)
|
|
||||||
)
|
|
||||||
publicstream_updated = cursor.rowcount
|
|
||||||
|
|
||||||
# Update userquota table
|
|
||||||
cursor.execute(
|
|
||||||
"UPDATE userquota SET uid = %s WHERE uid = %s",
|
|
||||||
(new_uid, old_uid)
|
|
||||||
)
|
|
||||||
userquota_updated = cursor.rowcount
|
|
||||||
|
|
||||||
# Update uploadlog table (if any records exist)
|
|
||||||
cursor.execute(
|
|
||||||
"UPDATE uploadlog SET uid = %s WHERE uid = %s",
|
|
||||||
(new_uid, old_uid)
|
|
||||||
)
|
|
||||||
uploadlog_updated = cursor.rowcount
|
|
||||||
|
|
||||||
log_message(f" - Updated {publicstream_updated} records in publicstream")
|
|
||||||
log_message(f" - Updated {userquota_updated} records in userquota")
|
|
||||||
log_message(f" - Updated {uploadlog_updated} records in uploadlog")
|
|
||||||
|
|
||||||
migration_count += publicstream_updated + userquota_updated + uploadlog_updated
|
|
||||||
|
|
||||||
return migration_count
|
|
||||||
|
|
||||||
def verify_migration(cursor):
|
|
||||||
"""Verify migration was successful"""
|
|
||||||
log_message("Verifying migration...")
|
|
||||||
|
|
||||||
# Check for any remaining non-email UIDs
|
|
||||||
cursor.execute("""
|
|
||||||
SELECT 'publicstream' as table_name, uid FROM publicstream WHERE uid NOT LIKE '%@%'
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'userquota' as table_name, uid FROM userquota WHERE uid NOT LIKE '%@%'
|
|
||||||
UNION ALL
|
|
||||||
SELECT 'uploadlog' as table_name, uid FROM uploadlog WHERE uid NOT LIKE '%@%'
|
|
||||||
""")
|
|
||||||
|
|
||||||
remaining_non_email = cursor.fetchall()
|
|
||||||
|
|
||||||
if remaining_non_email:
|
|
||||||
log_message("WARNING: Found remaining non-email UIDs:")
|
|
||||||
for table_name, uid in remaining_non_email:
|
|
||||||
log_message(f" - {table_name}: {uid}")
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
log_message("SUCCESS: All UIDs are now in email format")
|
|
||||||
return True
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Main migration function"""
|
|
||||||
log_message("Starting UID migration script")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Connect to database
|
|
||||||
log_message("Connecting to database...")
|
|
||||||
conn = psycopg2.connect(DATABASE_URL)
|
|
||||||
cursor = conn.cursor()
|
|
||||||
|
|
||||||
# Check current state
|
|
||||||
non_email_uids, quota_non_email_uids = check_current_state(cursor)
|
|
||||||
|
|
||||||
if not non_email_uids and not quota_non_email_uids:
|
|
||||||
log_message("No migration needed - all UIDs are already in email format")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Perform migration
|
|
||||||
migration_count = migrate_uids(cursor)
|
|
||||||
|
|
||||||
# Commit changes
|
|
||||||
conn.commit()
|
|
||||||
log_message(f"Migration committed - {migration_count} records updated")
|
|
||||||
|
|
||||||
# Verify migration
|
|
||||||
if verify_migration(cursor):
|
|
||||||
log_message("UID migration completed successfully!")
|
|
||||||
else:
|
|
||||||
log_message("UID migration completed with warnings - manual review needed")
|
|
||||||
|
|
||||||
except psycopg2.Error as e:
|
|
||||||
log_message(f"Database error: {e}")
|
|
||||||
if conn:
|
|
||||||
conn.rollback()
|
|
||||||
sys.exit(1)
|
|
||||||
except Exception as e:
|
|
||||||
log_message(f"Unexpected error: {e}")
|
|
||||||
if conn:
|
|
||||||
conn.rollback()
|
|
||||||
sys.exit(1)
|
|
||||||
finally:
|
|
||||||
if cursor:
|
|
||||||
cursor.close()
|
|
||||||
if conn:
|
|
||||||
conn.close()
|
|
||||||
log_message("Database connection closed")
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -12,3 +12,5 @@ uvicorn==0.34.2
|
|||||||
uvloop==0.21.0
|
uvloop==0.21.0
|
||||||
watchfiles==1.0.5
|
watchfiles==1.0.5
|
||||||
websockets==15.0.1
|
websockets==15.0.1
|
||||||
|
alembic
|
||||||
|
gunicorn
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
#
|
#
|
||||||
# pip-compile requirements.in
|
# pip-compile requirements.in
|
||||||
#
|
#
|
||||||
|
alembic==1.16.4
|
||||||
|
# via -r requirements.in
|
||||||
annotated-types==0.6.0
|
annotated-types==0.6.0
|
||||||
# via pydantic
|
# via pydantic
|
||||||
anyio==4.2.0
|
anyio==4.2.0
|
||||||
@ -18,6 +20,8 @@ fastapi==0.115.12
|
|||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
greenlet==3.2.1
|
greenlet==3.2.1
|
||||||
# via sqlalchemy
|
# via sqlalchemy
|
||||||
|
gunicorn==23.0.0
|
||||||
|
# via -r requirements.in
|
||||||
h11==0.14.0
|
h11==0.14.0
|
||||||
# via uvicorn
|
# via uvicorn
|
||||||
httptools==0.6.4
|
httptools==0.6.4
|
||||||
@ -26,8 +30,14 @@ idna==3.4
|
|||||||
# via anyio
|
# via anyio
|
||||||
limits==3.2.0
|
limits==3.2.0
|
||||||
# via slowapi
|
# via slowapi
|
||||||
|
mako==1.3.10
|
||||||
|
# via alembic
|
||||||
|
markupsafe==3.0.2
|
||||||
|
# via mako
|
||||||
packaging==23.0
|
packaging==23.0
|
||||||
# via limits
|
# via
|
||||||
|
# gunicorn
|
||||||
|
# limits
|
||||||
psycopg2-binary==2.9.10
|
psycopg2-binary==2.9.10
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
pydantic==2.6.0
|
pydantic==2.6.0
|
||||||
@ -47,13 +57,16 @@ slowapi==0.1.9
|
|||||||
sniffio==1.3.0
|
sniffio==1.3.0
|
||||||
# via anyio
|
# via anyio
|
||||||
sqlalchemy==2.0.40
|
sqlalchemy==2.0.40
|
||||||
# via sqlmodel
|
# via
|
||||||
|
# alembic
|
||||||
|
# sqlmodel
|
||||||
sqlmodel==0.0.24
|
sqlmodel==0.0.24
|
||||||
# via -r requirements.in
|
# via -r requirements.in
|
||||||
starlette==0.46.1
|
starlette==0.46.1
|
||||||
# via fastapi
|
# via fastapi
|
||||||
typing-extensions==4.13.2
|
typing-extensions==4.13.2
|
||||||
# via
|
# via
|
||||||
|
# alembic
|
||||||
# fastapi
|
# fastapi
|
||||||
# limits
|
# limits
|
||||||
# pydantic
|
# pydantic
|
||||||
|
@ -1,107 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Simple Database Cleanup Script
|
|
||||||
Uses the provided connection string to fix legacy data issues
|
|
||||||
"""
|
|
||||||
|
|
||||||
import psycopg2
|
|
||||||
import sys
|
|
||||||
|
|
||||||
# Database connection string provided by user
|
|
||||||
DATABASE_URL = "postgresql://d2s:kuTy4ZKs2VcjgDh6@localhost:5432/dictastream"
|
|
||||||
|
|
||||||
def execute_query(conn, query, description):
|
|
||||||
"""Execute a query and report results"""
|
|
||||||
print(f"\n{description}")
|
|
||||||
print(f"Query: {query}")
|
|
||||||
print("[DEBUG] Starting query execution...")
|
|
||||||
|
|
||||||
try:
|
|
||||||
print("[DEBUG] Creating cursor...")
|
|
||||||
with conn.cursor() as cur:
|
|
||||||
print("[DEBUG] Executing query...")
|
|
||||||
cur.execute(query)
|
|
||||||
print("[DEBUG] Query executed successfully")
|
|
||||||
|
|
||||||
if query.strip().upper().startswith('SELECT'):
|
|
||||||
print("[DEBUG] Fetching results...")
|
|
||||||
rows = cur.fetchall()
|
|
||||||
print(f"Result: {len(rows)} rows")
|
|
||||||
for row in rows:
|
|
||||||
print(f" {row}")
|
|
||||||
else:
|
|
||||||
print("[DEBUG] Committing transaction...")
|
|
||||||
conn.commit()
|
|
||||||
print(f"✅ Success: {cur.rowcount} rows affected")
|
|
||||||
print("[DEBUG] Query completed successfully")
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Error: {e}")
|
|
||||||
print(f"[DEBUG] Error type: {type(e).__name__}")
|
|
||||||
print("[DEBUG] Rolling back transaction...")
|
|
||||||
conn.rollback()
|
|
||||||
return False
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Execute database cleanup step by step"""
|
|
||||||
print("=== DATABASE LEGACY DATA CLEANUP ===")
|
|
||||||
print(f"Attempting to connect to: {DATABASE_URL}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
print("[DEBUG] Creating database connection...")
|
|
||||||
conn = psycopg2.connect(DATABASE_URL)
|
|
||||||
print("✅ Connected to database successfully")
|
|
||||||
print(f"[DEBUG] Connection status: {conn.status}")
|
|
||||||
print(f"[DEBUG] Database info: {conn.get_dsn_parameters()}")
|
|
||||||
|
|
||||||
# Step 1: Check current state
|
|
||||||
print("\n=== STEP 1: Check Current State ===")
|
|
||||||
execute_query(conn, 'SELECT email, username, display_name FROM "user"', "Check user table")
|
|
||||||
execute_query(conn, 'SELECT COUNT(*) as expired_active FROM dbsession WHERE expires_at < NOW() AND is_active = true', "Check expired sessions")
|
|
||||||
|
|
||||||
# Step 2: Mark expired sessions as inactive (this was successful before)
|
|
||||||
print("\n=== STEP 2: Fix Expired Sessions ===")
|
|
||||||
execute_query(conn, 'UPDATE dbsession SET is_active = false WHERE expires_at < NOW() AND is_active = true', "Mark expired sessions inactive")
|
|
||||||
|
|
||||||
# Step 3: Handle foreign key constraint by dropping it temporarily
|
|
||||||
print("\n=== STEP 3: Handle Foreign Key Constraint ===")
|
|
||||||
execute_query(conn, 'ALTER TABLE dbsession DROP CONSTRAINT IF EXISTS dbsession_user_id_fkey', "Drop foreign key constraint")
|
|
||||||
|
|
||||||
# Step 4: Update user table
|
|
||||||
print("\n=== STEP 4: Update User Table ===")
|
|
||||||
execute_query(conn, """UPDATE "user"
|
|
||||||
SET username = email,
|
|
||||||
display_name = CASE
|
|
||||||
WHEN display_name = '' OR display_name IS NULL
|
|
||||||
THEN split_part(email, '@', 1)
|
|
||||||
ELSE display_name
|
|
||||||
END
|
|
||||||
WHERE email = 'oib@chello.at'""", "Update user username to email")
|
|
||||||
|
|
||||||
# Step 5: Update session references
|
|
||||||
print("\n=== STEP 5: Update Session References ===")
|
|
||||||
execute_query(conn, "UPDATE dbsession SET user_id = 'oib@chello.at' WHERE user_id = 'oibchello'", "Update session user_id")
|
|
||||||
|
|
||||||
# Step 6: Recreate foreign key constraint
|
|
||||||
print("\n=== STEP 6: Recreate Foreign Key ===")
|
|
||||||
execute_query(conn, 'ALTER TABLE dbsession ADD CONSTRAINT dbsession_user_id_fkey FOREIGN KEY (user_id) REFERENCES "user"(username)', "Recreate foreign key")
|
|
||||||
|
|
||||||
# Step 7: Final verification
|
|
||||||
print("\n=== STEP 7: Final Verification ===")
|
|
||||||
execute_query(conn, 'SELECT email, username, display_name FROM "user"', "Verify user table")
|
|
||||||
execute_query(conn, 'SELECT DISTINCT user_id FROM dbsession', "Verify session user_id")
|
|
||||||
execute_query(conn, 'SELECT uid, username FROM publicstream', "Check publicstream")
|
|
||||||
|
|
||||||
print("\n✅ Database cleanup completed successfully!")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"❌ Database connection error: {e}")
|
|
||||||
return 1
|
|
||||||
finally:
|
|
||||||
if 'conn' in locals():
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main())
|
|
@ -97,7 +97,7 @@ def upload(request: Request, uid: str = Form(...), file: UploadFile = Form(...))
|
|||||||
ip=request.client.host,
|
ip=request.client.host,
|
||||||
filename=file.filename, # original filename from user
|
filename=file.filename, # original filename from user
|
||||||
processed_filename=None, # not yet processed
|
processed_filename=None, # not yet processed
|
||||||
size_bytes=None # not yet known
|
size_bytes=0 # placeholder to satisfy NOT NULL; updated after processing
|
||||||
)
|
)
|
||||||
db.add(early_log)
|
db.add(early_log)
|
||||||
log_violation("UPLOAD_DEBUG", request.client.host, uid, f"[FORCE FLUSH] Before db.flush() after early_log add")
|
log_violation("UPLOAD_DEBUG", request.client.host, uid, f"[FORCE FLUSH] Before db.flush() after early_log add")
|
||||||
|
Reference in New Issue
Block a user