Add S3 verification and failure cleanup tests
- Add test_s3_bucket_single_object_after_duplicates to verify only one S3 object exists - Add tests for upload failure scenarios (invalid project/package, empty file) - Add tests for orphaned S3 objects and database records cleanup - Add S3 direct access helpers (list_s3_objects_by_hash, s3_object_exists, etc.) - Fix conftest.py to use setdefault for env vars (don't override container config) All 52 tests now pass.
This commit is contained in:
@@ -14,16 +14,17 @@ from typing import Generator, BinaryIO
|
||||
from unittest.mock import MagicMock, patch
|
||||
import io
|
||||
|
||||
# Set test environment before importing app modules
|
||||
os.environ["ORCHARD_DATABASE_HOST"] = "localhost"
|
||||
os.environ["ORCHARD_DATABASE_PORT"] = "5432"
|
||||
os.environ["ORCHARD_DATABASE_USER"] = "test"
|
||||
os.environ["ORCHARD_DATABASE_PASSWORD"] = "test"
|
||||
os.environ["ORCHARD_DATABASE_DBNAME"] = "orchard_test"
|
||||
os.environ["ORCHARD_S3_ENDPOINT"] = "http://localhost:9000"
|
||||
os.environ["ORCHARD_S3_BUCKET"] = "test-bucket"
|
||||
os.environ["ORCHARD_S3_ACCESS_KEY_ID"] = "test"
|
||||
os.environ["ORCHARD_S3_SECRET_ACCESS_KEY"] = "test"
|
||||
# Set test environment defaults before importing app modules
|
||||
# Use setdefault to NOT override existing env vars (from docker-compose)
|
||||
os.environ.setdefault("ORCHARD_DATABASE_HOST", "localhost")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_PORT", "5432")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_USER", "test")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_PASSWORD", "test")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_DBNAME", "orchard_test")
|
||||
os.environ.setdefault("ORCHARD_S3_ENDPOINT", "http://localhost:9000")
|
||||
os.environ.setdefault("ORCHARD_S3_BUCKET", "test-bucket")
|
||||
os.environ.setdefault("ORCHARD_S3_ACCESS_KEY_ID", "test")
|
||||
os.environ.setdefault("ORCHARD_S3_SECRET_ACCESS_KEY", "test")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
@@ -315,3 +316,99 @@ def upload_test_file(
|
||||
)
|
||||
assert response.status_code == 200, f"Upload failed: {response.text}"
|
||||
return response.json()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# S3 Direct Access Helpers (for integration tests)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def get_s3_client():
|
||||
"""
|
||||
Create a boto3 S3 client for direct S3 access in integration tests.
|
||||
|
||||
Uses environment variables for configuration (same as the app).
|
||||
Note: When running in container, S3 endpoint should be 'minio:9000' not 'localhost:9000'.
|
||||
"""
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
|
||||
config = Config(s3={"addressing_style": "path"})
|
||||
|
||||
# Use the same endpoint as the app (minio:9000 in container, localhost:9000 locally)
|
||||
endpoint = os.environ.get("ORCHARD_S3_ENDPOINT", "http://minio:9000")
|
||||
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=endpoint,
|
||||
region_name=os.environ.get("ORCHARD_S3_REGION", "us-east-1"),
|
||||
aws_access_key_id=os.environ.get("ORCHARD_S3_ACCESS_KEY_ID", "minioadmin"),
|
||||
aws_secret_access_key=os.environ.get(
|
||||
"ORCHARD_S3_SECRET_ACCESS_KEY", "minioadmin"
|
||||
),
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
def get_s3_bucket():
|
||||
"""Get the S3 bucket name from environment."""
|
||||
return os.environ.get("ORCHARD_S3_BUCKET", "orchard-artifacts")
|
||||
|
||||
|
||||
def list_s3_objects_by_hash(sha256_hash: str) -> list:
|
||||
"""
|
||||
List S3 objects that match a specific SHA256 hash.
|
||||
|
||||
Uses the fruits/{hash[:2]}/{hash[2:4]}/{hash} key pattern.
|
||||
Returns list of matching object keys.
|
||||
"""
|
||||
client = get_s3_client()
|
||||
bucket = get_s3_bucket()
|
||||
prefix = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
response = client.list_objects_v2(Bucket=bucket, Prefix=prefix)
|
||||
|
||||
if "Contents" not in response:
|
||||
return []
|
||||
|
||||
return [obj["Key"] for obj in response["Contents"]]
|
||||
|
||||
|
||||
def count_s3_objects_by_prefix(prefix: str) -> int:
|
||||
"""
|
||||
Count S3 objects with a given prefix.
|
||||
|
||||
Useful for checking if duplicate uploads created multiple objects.
|
||||
"""
|
||||
client = get_s3_client()
|
||||
bucket = get_s3_bucket()
|
||||
|
||||
response = client.list_objects_v2(Bucket=bucket, Prefix=prefix)
|
||||
|
||||
if "Contents" not in response:
|
||||
return 0
|
||||
|
||||
return len(response["Contents"])
|
||||
|
||||
|
||||
def s3_object_exists(sha256_hash: str) -> bool:
|
||||
"""
|
||||
Check if an S3 object exists for a given SHA256 hash.
|
||||
"""
|
||||
objects = list_s3_objects_by_hash(sha256_hash)
|
||||
return len(objects) > 0
|
||||
|
||||
|
||||
def delete_s3_object_by_hash(sha256_hash: str) -> bool:
|
||||
"""
|
||||
Delete an S3 object by its SHA256 hash (for test cleanup).
|
||||
"""
|
||||
client = get_s3_client()
|
||||
bucket = get_s3_bucket()
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
try:
|
||||
client.delete_object(Bucket=bucket, Key=s3_key)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
Reference in New Issue
Block a user