Add factory reset endpoint for stage environment cleanup (#54)

This commit is contained in:
Mondo Diaz
2026-01-21 16:00:02 -06:00
parent c9026e1950
commit 535280a783
4 changed files with 206 additions and 0 deletions

View File

@@ -6390,3 +6390,110 @@ def get_artifact_provenance(
tags=tag_list,
uploads=upload_history,
)
# =============================================================================
# Factory Reset Endpoint (Admin Only)
# =============================================================================
@router.post("/api/v1/admin/factory-reset", tags=["admin"])
def factory_reset(
request: Request,
db: Session = Depends(get_db),
storage: S3Storage = Depends(get_storage),
current_user: User = Depends(require_admin),
):
"""
Factory reset - delete all data and restore to initial state.
This endpoint:
1. Drops all database tables
2. Deletes all objects from S3 storage
3. Recreates the database schema
4. Re-seeds with default admin user
Requires:
- Admin authentication
- X-Confirm-Reset header set to "yes-delete-all-data"
WARNING: This is a destructive operation that cannot be undone.
"""
# Require explicit confirmation header
confirm_header = request.headers.get("X-Confirm-Reset")
if confirm_header != "yes-delete-all-data":
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Factory reset requires X-Confirm-Reset header set to 'yes-delete-all-data'",
)
logger.warning(f"Factory reset initiated by admin user: {current_user.username}")
results = {
"database_tables_dropped": 0,
"s3_objects_deleted": 0,
"database_reinitialized": False,
"seeded": False,
}
try:
# Step 1: Drop all tables in public schema
logger.info("Dropping all database tables...")
drop_result = db.execute(
text("""
DO $$
DECLARE
r RECORD;
table_count INT := 0;
BEGIN
SET session_replication_role = 'replica';
FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') LOOP
EXECUTE 'DROP TABLE IF EXISTS public.' || quote_ident(r.tablename) || ' CASCADE';
table_count := table_count + 1;
END LOOP;
SET session_replication_role = 'origin';
RAISE NOTICE 'Dropped % tables', table_count;
END $$;
""")
)
db.commit()
# Count tables that were dropped
count_result = db.execute(
text("SELECT COUNT(*) FROM pg_tables WHERE schemaname = 'public'")
)
remaining_tables = count_result.scalar()
results["database_tables_dropped"] = "all"
logger.info(f"Database tables dropped, remaining: {remaining_tables}")
# Step 2: Delete all S3 objects
logger.info("Deleting all S3 objects...")
results["s3_objects_deleted"] = storage.delete_all()
# Step 3: Reinitialize database schema
logger.info("Reinitializing database schema...")
from .database import init_db
init_db()
results["database_reinitialized"] = True
# Step 4: Re-seed with default data
logger.info("Seeding database with defaults...")
from .seed import seed_database
seed_database()
results["seeded"] = True
logger.warning(f"Factory reset completed by {current_user.username}")
return {
"status": "success",
"message": "Factory reset completed successfully",
"results": results,
}
except Exception as e:
logger.error(f"Factory reset failed: {e}")
db.rollback()
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Factory reset failed: {str(e)}",
)

View File

@@ -835,6 +835,36 @@ class S3Storage:
except ClientError:
return False
def delete_all(self) -> int:
"""
Delete all objects in the bucket.
Returns:
Number of objects deleted
"""
deleted_count = 0
try:
paginator = self.client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=self.bucket):
objects = page.get("Contents", [])
if not objects:
continue
# Delete objects in batches of 1000 (S3 limit)
delete_keys = [{"Key": obj["Key"]} for obj in objects]
if delete_keys:
self.client.delete_objects(
Bucket=self.bucket, Delete={"Objects": delete_keys}
)
deleted_count += len(delete_keys)
logger.info(f"Deleted {len(delete_keys)} objects from S3")
logger.info(f"Total objects deleted from S3: {deleted_count}")
return deleted_count
except ClientError as e:
logger.error(f"Failed to delete all S3 objects: {e}")
raise
def generate_presigned_url(
self,
s3_key: str,