Add storage abstraction, stats endpoints, garbage collection, and test infrastructure
- Add StorageBackend protocol for backend-agnostic storage interface - Add health check with storage and database connectivity verification - Add garbage collection endpoints for orphaned artifacts (ref_count=0) - Add deduplication statistics endpoints (/api/v1/stats, /stats/storage, /stats/deduplication) - Add per-project statistics endpoint - Add verify_integrity method for post-upload hash validation - Set up pytest infrastructure with mock S3 client - Add unit tests for hash calculation and duplicate detection
This commit is contained in:
@@ -73,6 +73,11 @@ from .schemas import (
|
||||
SearchResultPackage,
|
||||
SearchResultArtifact,
|
||||
PresignedUrlResponse,
|
||||
GarbageCollectionResponse,
|
||||
OrphanedArtifactResponse,
|
||||
StorageStatsResponse,
|
||||
DeduplicationStatsResponse,
|
||||
ProjectStatsResponse,
|
||||
)
|
||||
from .metadata import extract_metadata
|
||||
from .config import get_settings
|
||||
@@ -246,8 +251,39 @@ def _log_audit(
|
||||
|
||||
# Health check
|
||||
@router.get("/health", response_model=HealthResponse)
|
||||
def health_check():
|
||||
return HealthResponse(status="ok")
|
||||
def health_check(
|
||||
db: Session = Depends(get_db),
|
||||
storage: S3Storage = Depends(get_storage),
|
||||
):
|
||||
"""
|
||||
Health check endpoint with optional storage and database health verification.
|
||||
"""
|
||||
storage_healthy = None
|
||||
database_healthy = None
|
||||
|
||||
# Check database connectivity
|
||||
try:
|
||||
db.execute("SELECT 1")
|
||||
database_healthy = True
|
||||
except Exception as e:
|
||||
logger.warning(f"Database health check failed: {e}")
|
||||
database_healthy = False
|
||||
|
||||
# Check storage connectivity by listing bucket (lightweight operation)
|
||||
try:
|
||||
storage.client.head_bucket(Bucket=storage.bucket)
|
||||
storage_healthy = True
|
||||
except Exception as e:
|
||||
logger.warning(f"Storage health check failed: {e}")
|
||||
storage_healthy = False
|
||||
|
||||
overall_status = "ok" if (storage_healthy and database_healthy) else "degraded"
|
||||
|
||||
return HealthResponse(
|
||||
status=overall_status,
|
||||
storage_healthy=storage_healthy,
|
||||
database_healthy=database_healthy,
|
||||
)
|
||||
|
||||
|
||||
# Global search
|
||||
@@ -2104,3 +2140,365 @@ def get_artifact(artifact_id: str, db: Session = Depends(get_db)):
|
||||
format_metadata=artifact.format_metadata,
|
||||
tags=tag_infos,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Garbage Collection Endpoints (ISSUE 36)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/admin/orphaned-artifacts",
|
||||
response_model=List[OrphanedArtifactResponse],
|
||||
)
|
||||
def list_orphaned_artifacts(
|
||||
request: Request,
|
||||
limit: int = Query(
|
||||
default=100, ge=1, le=1000, description="Max artifacts to return"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
List artifacts with ref_count=0 (orphaned artifacts not referenced by any tag).
|
||||
|
||||
These artifacts can be safely cleaned up as they are not referenced by any tag.
|
||||
"""
|
||||
orphaned = (
|
||||
db.query(Artifact)
|
||||
.filter(Artifact.ref_count == 0)
|
||||
.order_by(Artifact.created_at.asc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return [
|
||||
OrphanedArtifactResponse(
|
||||
id=a.id,
|
||||
size=a.size,
|
||||
created_at=a.created_at,
|
||||
created_by=a.created_by,
|
||||
original_name=a.original_name,
|
||||
)
|
||||
for a in orphaned
|
||||
]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/api/v1/admin/garbage-collect",
|
||||
response_model=GarbageCollectionResponse,
|
||||
)
|
||||
def garbage_collect(
|
||||
request: Request,
|
||||
dry_run: bool = Query(
|
||||
default=True, description="If true, only report what would be deleted"
|
||||
),
|
||||
limit: int = Query(
|
||||
default=100, ge=1, le=1000, description="Max artifacts to delete per run"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
storage: S3Storage = Depends(get_storage),
|
||||
):
|
||||
"""
|
||||
Clean up orphaned artifacts (ref_count=0) from storage and database.
|
||||
|
||||
By default runs in dry-run mode (only reports what would be deleted).
|
||||
Set dry_run=false to actually delete artifacts.
|
||||
|
||||
Returns list of deleted artifact IDs and total bytes freed.
|
||||
"""
|
||||
user_id = get_user_id(request)
|
||||
|
||||
# Find orphaned artifacts
|
||||
orphaned = (
|
||||
db.query(Artifact)
|
||||
.filter(Artifact.ref_count == 0)
|
||||
.order_by(Artifact.created_at.asc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
deleted_ids = []
|
||||
bytes_freed = 0
|
||||
|
||||
for artifact in orphaned:
|
||||
if not dry_run:
|
||||
# Delete from S3
|
||||
try:
|
||||
storage.delete(artifact.s3_key)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete S3 object {artifact.s3_key}: {e}")
|
||||
continue
|
||||
|
||||
# Delete from database
|
||||
db.delete(artifact)
|
||||
logger.info(
|
||||
f"Garbage collected artifact {artifact.id[:12]}... ({artifact.size} bytes)"
|
||||
)
|
||||
|
||||
deleted_ids.append(artifact.id)
|
||||
bytes_freed += artifact.size
|
||||
|
||||
if not dry_run:
|
||||
# Audit log
|
||||
_log_audit(
|
||||
db,
|
||||
action="garbage_collect",
|
||||
resource="artifacts",
|
||||
user_id=user_id,
|
||||
source_ip=request.client.host if request.client else None,
|
||||
details={
|
||||
"artifacts_deleted": len(deleted_ids),
|
||||
"bytes_freed": bytes_freed,
|
||||
"artifact_ids": deleted_ids[:10], # Log first 10 for brevity
|
||||
},
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return GarbageCollectionResponse(
|
||||
artifacts_deleted=len(deleted_ids),
|
||||
bytes_freed=bytes_freed,
|
||||
artifact_ids=deleted_ids,
|
||||
dry_run=dry_run,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Statistics Endpoints (ISSUE 34)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@router.get("/api/v1/stats", response_model=StorageStatsResponse)
|
||||
def get_storage_stats(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get global storage statistics including deduplication metrics.
|
||||
"""
|
||||
# Total artifacts and size
|
||||
total_stats = db.query(
|
||||
func.count(Artifact.id),
|
||||
func.coalesce(func.sum(Artifact.size), 0),
|
||||
).first()
|
||||
total_artifacts = total_stats[0] or 0
|
||||
total_size_bytes = total_stats[1] or 0
|
||||
|
||||
# Unique artifacts (ref_count > 0) and their size
|
||||
unique_stats = (
|
||||
db.query(
|
||||
func.count(Artifact.id),
|
||||
)
|
||||
.filter(Artifact.ref_count > 0)
|
||||
.first()
|
||||
)
|
||||
unique_artifacts = unique_stats[0] or 0
|
||||
|
||||
# Orphaned artifacts (ref_count = 0)
|
||||
orphaned_stats = (
|
||||
db.query(
|
||||
func.count(Artifact.id),
|
||||
func.coalesce(func.sum(Artifact.size), 0),
|
||||
)
|
||||
.filter(Artifact.ref_count == 0)
|
||||
.first()
|
||||
)
|
||||
orphaned_artifacts = orphaned_stats[0] or 0
|
||||
orphaned_size_bytes = orphaned_stats[1] or 0
|
||||
|
||||
# Total uploads and deduplicated uploads
|
||||
upload_stats = db.query(
|
||||
func.count(Upload.id),
|
||||
func.count(Upload.id).filter(Upload.deduplicated == True),
|
||||
).first()
|
||||
total_uploads = upload_stats[0] or 0
|
||||
deduplicated_uploads = upload_stats[1] or 0
|
||||
|
||||
# Calculate deduplication ratio
|
||||
deduplication_ratio = (
|
||||
total_uploads / unique_artifacts if unique_artifacts > 0 else 0.0
|
||||
)
|
||||
|
||||
# Calculate storage saved (sum of size * (ref_count - 1) for artifacts with ref_count > 1)
|
||||
# This represents bytes that would have been stored without deduplication
|
||||
saved_query = (
|
||||
db.query(func.coalesce(func.sum(Artifact.size * (Artifact.ref_count - 1)), 0))
|
||||
.filter(Artifact.ref_count > 1)
|
||||
.first()
|
||||
)
|
||||
storage_saved_bytes = saved_query[0] or 0
|
||||
|
||||
return StorageStatsResponse(
|
||||
total_artifacts=total_artifacts,
|
||||
total_size_bytes=total_size_bytes,
|
||||
unique_artifacts=unique_artifacts,
|
||||
orphaned_artifacts=orphaned_artifacts,
|
||||
orphaned_size_bytes=orphaned_size_bytes,
|
||||
total_uploads=total_uploads,
|
||||
deduplicated_uploads=deduplicated_uploads,
|
||||
deduplication_ratio=deduplication_ratio,
|
||||
storage_saved_bytes=storage_saved_bytes,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/v1/stats/storage", response_model=StorageStatsResponse)
|
||||
def get_storage_stats_alias(db: Session = Depends(get_db)):
|
||||
"""Alias for /api/v1/stats - get global storage statistics."""
|
||||
return get_storage_stats(db)
|
||||
|
||||
|
||||
@router.get("/api/v1/stats/deduplication", response_model=DeduplicationStatsResponse)
|
||||
def get_deduplication_stats(
|
||||
top_n: int = Query(
|
||||
default=10,
|
||||
ge=1,
|
||||
le=100,
|
||||
description="Number of top referenced artifacts to return",
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Get detailed deduplication effectiveness statistics.
|
||||
"""
|
||||
# Total logical bytes (sum of all upload sizes - what would be stored without dedup)
|
||||
# We calculate this as: sum(artifact.size * artifact.ref_count) for all artifacts
|
||||
logical_query = db.query(
|
||||
func.coalesce(func.sum(Artifact.size * Artifact.ref_count), 0)
|
||||
).first()
|
||||
total_logical_bytes = logical_query[0] or 0
|
||||
|
||||
# Total physical bytes (actual storage used)
|
||||
physical_query = (
|
||||
db.query(func.coalesce(func.sum(Artifact.size), 0))
|
||||
.filter(Artifact.ref_count > 0)
|
||||
.first()
|
||||
)
|
||||
total_physical_bytes = physical_query[0] or 0
|
||||
|
||||
# Bytes saved
|
||||
bytes_saved = total_logical_bytes - total_physical_bytes
|
||||
|
||||
# Savings percentage
|
||||
savings_percentage = (
|
||||
(bytes_saved / total_logical_bytes * 100) if total_logical_bytes > 0 else 0.0
|
||||
)
|
||||
|
||||
# Upload counts
|
||||
total_uploads = db.query(func.count(Upload.id)).scalar() or 0
|
||||
unique_artifacts = (
|
||||
db.query(func.count(Artifact.id)).filter(Artifact.ref_count > 0).scalar() or 0
|
||||
)
|
||||
duplicate_uploads = (
|
||||
total_uploads - unique_artifacts if total_uploads > unique_artifacts else 0
|
||||
)
|
||||
|
||||
# Average and max ref_count
|
||||
ref_stats = (
|
||||
db.query(
|
||||
func.coalesce(func.avg(Artifact.ref_count), 0),
|
||||
func.coalesce(func.max(Artifact.ref_count), 0),
|
||||
)
|
||||
.filter(Artifact.ref_count > 0)
|
||||
.first()
|
||||
)
|
||||
average_ref_count = float(ref_stats[0] or 0)
|
||||
max_ref_count = ref_stats[1] or 0
|
||||
|
||||
# Top N most referenced artifacts
|
||||
top_artifacts = (
|
||||
db.query(Artifact)
|
||||
.filter(Artifact.ref_count > 1)
|
||||
.order_by(Artifact.ref_count.desc())
|
||||
.limit(top_n)
|
||||
.all()
|
||||
)
|
||||
|
||||
most_referenced = [
|
||||
{
|
||||
"id": a.id,
|
||||
"ref_count": a.ref_count,
|
||||
"size": a.size,
|
||||
"storage_saved": a.size * (a.ref_count - 1),
|
||||
"original_name": a.original_name,
|
||||
}
|
||||
for a in top_artifacts
|
||||
]
|
||||
|
||||
return DeduplicationStatsResponse(
|
||||
total_logical_bytes=total_logical_bytes,
|
||||
total_physical_bytes=total_physical_bytes,
|
||||
bytes_saved=bytes_saved,
|
||||
savings_percentage=savings_percentage,
|
||||
total_uploads=total_uploads,
|
||||
unique_artifacts=unique_artifacts,
|
||||
duplicate_uploads=duplicate_uploads,
|
||||
average_ref_count=average_ref_count,
|
||||
max_ref_count=max_ref_count,
|
||||
most_referenced_artifacts=most_referenced,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/api/v1/projects/{project_name}/stats", response_model=ProjectStatsResponse
|
||||
)
|
||||
def get_project_stats(
|
||||
project_name: str,
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Get statistics for a specific project.
|
||||
"""
|
||||
project = db.query(Project).filter(Project.name == project_name).first()
|
||||
if not project:
|
||||
raise HTTPException(status_code=404, detail="Project not found")
|
||||
|
||||
# Package count
|
||||
package_count = (
|
||||
db.query(func.count(Package.id))
|
||||
.filter(Package.project_id == project.id)
|
||||
.scalar()
|
||||
or 0
|
||||
)
|
||||
|
||||
# Get all package IDs for this project
|
||||
package_ids = (
|
||||
db.query(Package.id).filter(Package.project_id == project.id).subquery()
|
||||
)
|
||||
|
||||
# Tag count
|
||||
tag_count = (
|
||||
db.query(func.count(Tag.id)).filter(Tag.package_id.in_(package_ids)).scalar()
|
||||
or 0
|
||||
)
|
||||
|
||||
# Unique artifact count and total size (via uploads)
|
||||
artifact_stats = (
|
||||
db.query(
|
||||
func.count(func.distinct(Upload.artifact_id)),
|
||||
func.coalesce(func.sum(Artifact.size), 0),
|
||||
)
|
||||
.join(Artifact, Upload.artifact_id == Artifact.id)
|
||||
.filter(Upload.package_id.in_(package_ids))
|
||||
.first()
|
||||
)
|
||||
artifact_count = artifact_stats[0] if artifact_stats else 0
|
||||
total_size_bytes = artifact_stats[1] if artifact_stats else 0
|
||||
|
||||
# Upload counts
|
||||
upload_stats = (
|
||||
db.query(
|
||||
func.count(Upload.id),
|
||||
func.count(Upload.id).filter(Upload.deduplicated == True),
|
||||
)
|
||||
.filter(Upload.package_id.in_(package_ids))
|
||||
.first()
|
||||
)
|
||||
upload_count = upload_stats[0] if upload_stats else 0
|
||||
deduplicated_uploads = upload_stats[1] if upload_stats else 0
|
||||
|
||||
return ProjectStatsResponse(
|
||||
project_id=str(project.id),
|
||||
project_name=project.name,
|
||||
package_count=package_count,
|
||||
tag_count=tag_count,
|
||||
artifact_count=artifact_count,
|
||||
total_size_bytes=total_size_bytes,
|
||||
upload_count=upload_count,
|
||||
deduplicated_uploads=deduplicated_uploads,
|
||||
)
|
||||
|
||||
@@ -387,3 +387,72 @@ class PresignedUrlResponse(BaseModel):
|
||||
class HealthResponse(BaseModel):
|
||||
status: str
|
||||
version: str = "1.0.0"
|
||||
storage_healthy: Optional[bool] = None
|
||||
database_healthy: Optional[bool] = None
|
||||
|
||||
|
||||
# Garbage collection schemas
|
||||
class GarbageCollectionResponse(BaseModel):
|
||||
"""Response from garbage collection operation"""
|
||||
|
||||
artifacts_deleted: int
|
||||
bytes_freed: int
|
||||
artifact_ids: List[str]
|
||||
dry_run: bool
|
||||
|
||||
|
||||
class OrphanedArtifactResponse(BaseModel):
|
||||
"""Information about an orphaned artifact"""
|
||||
|
||||
id: str
|
||||
size: int
|
||||
created_at: datetime
|
||||
created_by: str
|
||||
original_name: Optional[str]
|
||||
|
||||
|
||||
# Storage statistics schemas
|
||||
class StorageStatsResponse(BaseModel):
|
||||
"""Global storage statistics"""
|
||||
|
||||
total_artifacts: int
|
||||
total_size_bytes: int
|
||||
unique_artifacts: int # Artifacts with ref_count > 0
|
||||
orphaned_artifacts: int # Artifacts with ref_count = 0
|
||||
orphaned_size_bytes: int
|
||||
total_uploads: int
|
||||
deduplicated_uploads: int
|
||||
deduplication_ratio: (
|
||||
float # total_uploads / unique_artifacts (if > 1, deduplication is working)
|
||||
)
|
||||
storage_saved_bytes: int # Bytes saved through deduplication
|
||||
|
||||
|
||||
class DeduplicationStatsResponse(BaseModel):
|
||||
"""Deduplication effectiveness statistics"""
|
||||
|
||||
total_logical_bytes: (
|
||||
int # Sum of all upload sizes (what would be stored without dedup)
|
||||
)
|
||||
total_physical_bytes: int # Actual storage used
|
||||
bytes_saved: int
|
||||
savings_percentage: float
|
||||
total_uploads: int
|
||||
unique_artifacts: int
|
||||
duplicate_uploads: int
|
||||
average_ref_count: float
|
||||
max_ref_count: int
|
||||
most_referenced_artifacts: List[Dict[str, Any]] # Top N most referenced
|
||||
|
||||
|
||||
class ProjectStatsResponse(BaseModel):
|
||||
"""Per-project statistics"""
|
||||
|
||||
project_id: str
|
||||
project_name: str
|
||||
package_count: int
|
||||
tag_count: int
|
||||
artifact_count: int
|
||||
total_size_bytes: int
|
||||
upload_count: int
|
||||
deduplicated_uploads: int
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import BinaryIO, Tuple, Optional, Dict, Any, Generator, NamedTuple
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
BinaryIO,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
Generator,
|
||||
NamedTuple,
|
||||
Protocol,
|
||||
runtime_checkable,
|
||||
)
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
@@ -10,6 +21,133 @@ from .config import get_settings
|
||||
settings = get_settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Storage Backend Protocol/Interface (ISSUE 33)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StorageBackend(Protocol):
|
||||
"""
|
||||
Abstract protocol defining the interface for storage backends.
|
||||
|
||||
All storage implementations (S3, MinIO, future backends) must implement
|
||||
this interface to ensure consistent behavior across the application.
|
||||
|
||||
Note on Deduplication:
|
||||
- This system uses whole-file deduplication based on SHA256 hash
|
||||
- Partial/chunk-level deduplication is NOT supported (out of scope for MVP)
|
||||
- Files with identical content but different metadata are deduplicated
|
||||
"""
|
||||
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> "StorageResult":
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
|
||||
Content-addressable: if the file already exists (by hash), just return
|
||||
the existing hash without uploading again.
|
||||
|
||||
Args:
|
||||
file: File-like object to store
|
||||
content_length: Optional hint for file size (enables multipart upload)
|
||||
|
||||
Returns:
|
||||
StorageResult with sha256, size, s3_key, and optional checksums
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
S3ExistenceCheckError: If existence check fails after retries
|
||||
S3UploadError: If upload fails
|
||||
"""
|
||||
...
|
||||
|
||||
def get(self, s3_key: str) -> bytes:
|
||||
"""
|
||||
Retrieve a file by its storage key.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key (path) of the file
|
||||
|
||||
Returns:
|
||||
File content as bytes
|
||||
"""
|
||||
...
|
||||
|
||||
def get_stream(
|
||||
self, s3_key: str, range_header: Optional[str] = None
|
||||
) -> Tuple[Any, int, Optional[str]]:
|
||||
"""
|
||||
Get a streaming response for a file.
|
||||
|
||||
Supports range requests for partial downloads.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
range_header: Optional HTTP Range header value
|
||||
|
||||
Returns:
|
||||
Tuple of (stream, content_length, content_range)
|
||||
"""
|
||||
...
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""
|
||||
Delete a file from storage.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
def get_object_info(self, s3_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get object metadata without downloading content.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
|
||||
Returns:
|
||||
Dict with size, content_type, last_modified, etag, or None if not found
|
||||
"""
|
||||
...
|
||||
|
||||
def generate_presigned_url(
|
||||
self,
|
||||
s3_key: str,
|
||||
expiry: Optional[int] = None,
|
||||
response_content_type: Optional[str] = None,
|
||||
response_content_disposition: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a presigned URL for downloading an object.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expiry: URL expiry in seconds
|
||||
response_content_type: Override Content-Type header in response
|
||||
response_content_disposition: Override Content-Disposition header
|
||||
|
||||
Returns:
|
||||
Presigned URL string
|
||||
"""
|
||||
...
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
# Threshold for multipart upload (100MB)
|
||||
MULTIPART_THRESHOLD = 100 * 1024 * 1024
|
||||
# Chunk size for multipart upload (10MB)
|
||||
@@ -622,12 +760,68 @@ class S3Storage:
|
||||
)
|
||||
return url
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Performs a lightweight HEAD request on the bucket to verify connectivity.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
try:
|
||||
self.client.head_bucket(Bucket=self.bucket)
|
||||
return True
|
||||
except ClientError as e:
|
||||
logger.warning(f"Storage health check failed: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during storage health check: {e}")
|
||||
return False
|
||||
|
||||
def verify_integrity(self, s3_key: str, expected_sha256: str) -> bool:
|
||||
"""
|
||||
Verify the integrity of a stored object by downloading and re-hashing.
|
||||
|
||||
This is an expensive operation and should only be used for critical
|
||||
verification scenarios.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expected_sha256: The expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
True if hash matches, False otherwise
|
||||
"""
|
||||
try:
|
||||
content = self.get(s3_key)
|
||||
actual_hash = hashlib.sha256(content).hexdigest()
|
||||
if actual_hash != expected_sha256:
|
||||
logger.error(
|
||||
f"Integrity verification failed for {s3_key}: "
|
||||
f"expected {expected_sha256[:12]}..., got {actual_hash[:12]}..."
|
||||
)
|
||||
return False
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during integrity verification for {s3_key}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_storage = None
|
||||
_storage: Optional[S3Storage] = None
|
||||
|
||||
|
||||
def get_storage() -> S3Storage:
|
||||
def get_storage() -> StorageBackend:
|
||||
"""
|
||||
Get the configured storage backend instance.
|
||||
|
||||
Currently returns S3Storage (works with S3-compatible backends like MinIO).
|
||||
Future implementations may support backend selection via configuration.
|
||||
|
||||
Returns:
|
||||
StorageBackend instance
|
||||
"""
|
||||
global _storage
|
||||
if _storage is None:
|
||||
_storage = S3Storage()
|
||||
|
||||
14
backend/pytest.ini
Normal file
14
backend/pytest.ini
Normal file
@@ -0,0 +1,14 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_functions = test_*
|
||||
python_classes = Test*
|
||||
asyncio_mode = auto
|
||||
addopts = -v --tb=short
|
||||
filterwarnings =
|
||||
ignore::DeprecationWarning
|
||||
ignore::UserWarning
|
||||
markers =
|
||||
unit: Unit tests (no external dependencies)
|
||||
integration: Integration tests (require database/storage)
|
||||
slow: Slow tests (skip with -m "not slow")
|
||||
@@ -9,3 +9,10 @@ pydantic==2.5.3
|
||||
pydantic-settings==2.1.0
|
||||
python-jose[cryptography]==3.3.0
|
||||
passlib[bcrypt]==1.7.4
|
||||
|
||||
# Test dependencies
|
||||
pytest>=7.4.0
|
||||
pytest-asyncio>=0.21.0
|
||||
pytest-cov>=4.1.0
|
||||
httpx>=0.25.0
|
||||
moto[s3]>=4.2.0
|
||||
|
||||
1
backend/tests/__init__.py
Normal file
1
backend/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Test package
|
||||
201
backend/tests/conftest.py
Normal file
201
backend/tests/conftest.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""
|
||||
Test configuration and fixtures for Orchard backend tests.
|
||||
|
||||
This module provides:
|
||||
- Database fixtures with test isolation
|
||||
- Mock S3 storage using moto
|
||||
- Test data factories for common scenarios
|
||||
"""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
import hashlib
|
||||
from typing import Generator, BinaryIO
|
||||
from unittest.mock import MagicMock, patch
|
||||
import io
|
||||
|
||||
# Set test environment before importing app modules
|
||||
os.environ["ORCHARD_DATABASE_HOST"] = "localhost"
|
||||
os.environ["ORCHARD_DATABASE_PORT"] = "5432"
|
||||
os.environ["ORCHARD_DATABASE_USER"] = "test"
|
||||
os.environ["ORCHARD_DATABASE_PASSWORD"] = "test"
|
||||
os.environ["ORCHARD_DATABASE_DBNAME"] = "orchard_test"
|
||||
os.environ["ORCHARD_S3_ENDPOINT"] = "http://localhost:9000"
|
||||
os.environ["ORCHARD_S3_BUCKET"] = "test-bucket"
|
||||
os.environ["ORCHARD_S3_ACCESS_KEY_ID"] = "test"
|
||||
os.environ["ORCHARD_S3_SECRET_ACCESS_KEY"] = "test"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Test Data Factories
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def create_test_file(content: bytes = None, size: int = 1024) -> io.BytesIO:
|
||||
"""
|
||||
Create a test file with known content.
|
||||
|
||||
Args:
|
||||
content: Specific content to use, or None to generate random-ish content
|
||||
size: Size of generated content if content is None
|
||||
|
||||
Returns:
|
||||
BytesIO object with the content
|
||||
"""
|
||||
if content is None:
|
||||
content = os.urandom(size)
|
||||
return io.BytesIO(content)
|
||||
|
||||
|
||||
def compute_sha256(content: bytes) -> str:
|
||||
"""Compute SHA256 hash of content as lowercase hex string."""
|
||||
return hashlib.sha256(content).hexdigest()
|
||||
|
||||
|
||||
def compute_md5(content: bytes) -> str:
|
||||
"""Compute MD5 hash of content as lowercase hex string."""
|
||||
return hashlib.md5(content).hexdigest()
|
||||
|
||||
|
||||
def compute_sha1(content: bytes) -> str:
|
||||
"""Compute SHA1 hash of content as lowercase hex string."""
|
||||
return hashlib.sha1(content).hexdigest()
|
||||
|
||||
|
||||
# Known test data with pre-computed hashes
|
||||
TEST_CONTENT_HELLO = b"Hello, World!"
|
||||
TEST_HASH_HELLO = "dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f"
|
||||
TEST_MD5_HELLO = "65a8e27d8879283831b664bd8b7f0ad4"
|
||||
TEST_SHA1_HELLO = "0a0a9f2a6772942557ab5355d76af442f8f65e01"
|
||||
|
||||
TEST_CONTENT_EMPTY = b""
|
||||
# Note: Empty content should be rejected by the storage layer
|
||||
|
||||
TEST_CONTENT_BINARY = bytes(range(256))
|
||||
TEST_HASH_BINARY = compute_sha256(TEST_CONTENT_BINARY)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Mock Storage Fixtures
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class MockS3Client:
|
||||
"""Mock S3 client for unit testing without actual S3/MinIO."""
|
||||
|
||||
def __init__(self):
|
||||
self.objects = {} # key -> content
|
||||
self.bucket = "test-bucket"
|
||||
|
||||
def put_object(self, Bucket: str, Key: str, Body: bytes) -> dict:
|
||||
self.objects[Key] = Body
|
||||
return {"ETag": f'"{compute_md5(Body)}"'}
|
||||
|
||||
def get_object(self, Bucket: str, Key: str, **kwargs) -> dict:
|
||||
if Key not in self.objects:
|
||||
raise Exception("NoSuchKey")
|
||||
content = self.objects[Key]
|
||||
return {
|
||||
"Body": io.BytesIO(content),
|
||||
"ContentLength": len(content),
|
||||
}
|
||||
|
||||
def head_object(self, Bucket: str, Key: str) -> dict:
|
||||
if Key not in self.objects:
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
error_response = {"Error": {"Code": "404", "Message": "Not Found"}}
|
||||
raise ClientError(error_response, "HeadObject")
|
||||
content = self.objects[Key]
|
||||
return {
|
||||
"ContentLength": len(content),
|
||||
"ETag": f'"{compute_md5(content)}"',
|
||||
}
|
||||
|
||||
def delete_object(self, Bucket: str, Key: str) -> dict:
|
||||
if Key in self.objects:
|
||||
del self.objects[Key]
|
||||
return {}
|
||||
|
||||
def head_bucket(self, Bucket: str) -> dict:
|
||||
return {}
|
||||
|
||||
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
|
||||
return {"UploadId": "test-upload-id"}
|
||||
|
||||
def upload_part(
|
||||
self, Bucket: str, Key: str, UploadId: str, PartNumber: int, Body: bytes
|
||||
) -> dict:
|
||||
return {"ETag": f'"{compute_md5(Body)}"'}
|
||||
|
||||
def complete_multipart_upload(
|
||||
self, Bucket: str, Key: str, UploadId: str, MultipartUpload: dict
|
||||
) -> dict:
|
||||
return {"ETag": '"test-etag"'}
|
||||
|
||||
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
|
||||
return {}
|
||||
|
||||
def generate_presigned_url(
|
||||
self, ClientMethod: str, Params: dict, ExpiresIn: int
|
||||
) -> str:
|
||||
return f"https://test-bucket.s3.amazonaws.com/{Params['Key']}?presigned=true"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_s3_client() -> MockS3Client:
|
||||
"""Provide a mock S3 client for unit tests."""
|
||||
return MockS3Client()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_storage(mock_s3_client):
|
||||
"""
|
||||
Provide a mock storage instance for unit tests.
|
||||
|
||||
Uses the MockS3Client to avoid actual S3/MinIO calls.
|
||||
"""
|
||||
from app.storage import S3Storage
|
||||
|
||||
storage = S3Storage.__new__(S3Storage)
|
||||
storage.client = mock_s3_client
|
||||
storage.bucket = "test-bucket"
|
||||
storage._active_uploads = {}
|
||||
|
||||
return storage
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Database Fixtures (for integration tests)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_db_url():
|
||||
"""Get the test database URL."""
|
||||
return (
|
||||
f"postgresql://{os.environ['ORCHARD_DATABASE_USER']}:"
|
||||
f"{os.environ['ORCHARD_DATABASE_PASSWORD']}@"
|
||||
f"{os.environ['ORCHARD_DATABASE_HOST']}:"
|
||||
f"{os.environ['ORCHARD_DATABASE_PORT']}/"
|
||||
f"{os.environ['ORCHARD_DATABASE_DBNAME']}"
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# HTTP Client Fixtures (for API tests)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_app():
|
||||
"""
|
||||
Create a test FastAPI application.
|
||||
|
||||
Note: This requires the database to be available for integration tests.
|
||||
For unit tests, use mock_storage fixture instead.
|
||||
"""
|
||||
from fastapi.testclient import TestClient
|
||||
from app.main import app
|
||||
|
||||
return TestClient(app)
|
||||
207
backend/tests/test_duplicate_detection.py
Normal file
207
backend/tests/test_duplicate_detection.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""
|
||||
Unit tests for duplicate detection and deduplication logic.
|
||||
|
||||
Tests cover:
|
||||
- _exists() method correctly identifies existing S3 keys
|
||||
- S3 key generation follows expected pattern
|
||||
- Storage layer skips upload when artifact already exists
|
||||
- Storage layer performs upload when artifact does not exist
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import io
|
||||
from unittest.mock import MagicMock, patch
|
||||
from tests.conftest import (
|
||||
compute_sha256,
|
||||
TEST_CONTENT_HELLO,
|
||||
TEST_HASH_HELLO,
|
||||
)
|
||||
|
||||
|
||||
class TestExistsMethod:
|
||||
"""Tests for the _exists() method that checks S3 object existence."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_exists_returns_true_for_existing_key(self, mock_storage, mock_s3_client):
|
||||
"""Test _exists() returns True when object exists."""
|
||||
# Pre-populate the mock storage
|
||||
test_key = "fruits/df/fd/test-hash"
|
||||
mock_s3_client.objects[test_key] = b"content"
|
||||
|
||||
result = mock_storage._exists(test_key)
|
||||
|
||||
assert result is True
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_exists_returns_false_for_nonexistent_key(self, mock_storage):
|
||||
"""Test _exists() returns False when object doesn't exist."""
|
||||
result = mock_storage._exists("fruits/no/ne/nonexistent-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_exists_handles_404_error(self, mock_storage):
|
||||
"""Test _exists() handles 404 errors gracefully."""
|
||||
# The mock client raises ClientError for nonexistent keys
|
||||
result = mock_storage._exists("fruits/xx/yy/does-not-exist")
|
||||
|
||||
assert result is False
|
||||
|
||||
|
||||
class TestS3KeyGeneration:
|
||||
"""Tests for S3 key pattern generation."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_s3_key_pattern(self):
|
||||
"""Test S3 key follows pattern: fruits/{hash[:2]}/{hash[2:4]}/{hash}"""
|
||||
test_hash = "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
|
||||
|
||||
expected_key = f"fruits/{test_hash[:2]}/{test_hash[2:4]}/{test_hash}"
|
||||
# Expected: fruits/ab/cd/abcdef1234567890...
|
||||
|
||||
assert expected_key == f"fruits/ab/cd/{test_hash}"
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_s3_key_generation_in_storage(self, mock_storage):
|
||||
"""Test storage layer generates correct S3 key."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_key = (
|
||||
f"fruits/{TEST_HASH_HELLO[:2]}/{TEST_HASH_HELLO[2:4]}/{TEST_HASH_HELLO}"
|
||||
)
|
||||
assert result.s3_key == expected_key
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_s3_key_uses_sha256_hash(self, mock_storage):
|
||||
"""Test S3 key is derived from SHA256 hash."""
|
||||
content = b"unique test content for key test"
|
||||
file_obj = io.BytesIO(content)
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# Key should contain the hash
|
||||
assert expected_hash in result.s3_key
|
||||
|
||||
|
||||
class TestDeduplicationBehavior:
|
||||
"""Tests for deduplication (skip upload when exists)."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_skips_upload_when_exists(self, mock_storage, mock_s3_client):
|
||||
"""Test storage skips S3 upload when artifact already exists."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
s3_key = (
|
||||
f"fruits/{TEST_HASH_HELLO[:2]}/{TEST_HASH_HELLO[2:4]}/{TEST_HASH_HELLO}"
|
||||
)
|
||||
|
||||
# Pre-populate storage (simulate existing artifact)
|
||||
mock_s3_client.objects[s3_key] = content
|
||||
|
||||
# Track put_object calls
|
||||
original_put = mock_s3_client.put_object
|
||||
put_called = []
|
||||
|
||||
def tracked_put(*args, **kwargs):
|
||||
put_called.append(True)
|
||||
return original_put(*args, **kwargs)
|
||||
|
||||
mock_s3_client.put_object = tracked_put
|
||||
|
||||
# Store the same content
|
||||
file_obj = io.BytesIO(content)
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# put_object should NOT have been called (deduplication)
|
||||
assert len(put_called) == 0
|
||||
assert result.sha256 == TEST_HASH_HELLO
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_uploads_when_not_exists(self, mock_storage, mock_s3_client):
|
||||
"""Test storage uploads to S3 when artifact doesn't exist."""
|
||||
content = b"brand new unique content"
|
||||
content_hash = compute_sha256(content)
|
||||
s3_key = f"fruits/{content_hash[:2]}/{content_hash[2:4]}/{content_hash}"
|
||||
|
||||
# Ensure object doesn't exist
|
||||
assert s3_key not in mock_s3_client.objects
|
||||
|
||||
# Store the content
|
||||
file_obj = io.BytesIO(content)
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# Object should now exist in mock storage
|
||||
assert s3_key in mock_s3_client.objects
|
||||
assert mock_s3_client.objects[s3_key] == content
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_returns_same_hash_for_duplicate(self, mock_storage, mock_s3_client):
|
||||
"""Test storing same content twice returns same hash."""
|
||||
content = b"content to be stored twice"
|
||||
|
||||
# First store
|
||||
file1 = io.BytesIO(content)
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
|
||||
# Second store (duplicate)
|
||||
file2 = io.BytesIO(content)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
|
||||
assert result1.sha256 == result2.sha256
|
||||
assert result1.s3_key == result2.s3_key
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_different_content_different_keys(self, mock_storage):
|
||||
"""Test different content produces different S3 keys."""
|
||||
content1 = b"first content"
|
||||
content2 = b"second content"
|
||||
|
||||
file1 = io.BytesIO(content1)
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
|
||||
file2 = io.BytesIO(content2)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
|
||||
assert result1.sha256 != result2.sha256
|
||||
assert result1.s3_key != result2.s3_key
|
||||
|
||||
|
||||
class TestDeduplicationEdgeCases:
|
||||
"""Edge case tests for deduplication."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_same_content_different_filenames(self, mock_storage):
|
||||
"""Test same content with different metadata is deduplicated."""
|
||||
content = b"identical content"
|
||||
|
||||
# Store with "filename1"
|
||||
file1 = io.BytesIO(content)
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
|
||||
# Store with "filename2" (same content)
|
||||
file2 = io.BytesIO(content)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
|
||||
# Both should have same hash (content-addressable)
|
||||
assert result1.sha256 == result2.sha256
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_whitespace_only_difference(self, mock_storage):
|
||||
"""Test content differing only by whitespace produces different hashes."""
|
||||
content1 = b"test content"
|
||||
content2 = b"test content" # Extra space
|
||||
content3 = b"test content " # Trailing space
|
||||
|
||||
file1 = io.BytesIO(content1)
|
||||
file2 = io.BytesIO(content2)
|
||||
file3 = io.BytesIO(content3)
|
||||
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
result3 = mock_storage._store_simple(file3)
|
||||
|
||||
# All should be different (content-addressable)
|
||||
assert len({result1.sha256, result2.sha256, result3.sha256}) == 3
|
||||
215
backend/tests/test_hash_calculation.py
Normal file
215
backend/tests/test_hash_calculation.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""
|
||||
Unit tests for SHA256 hash calculation and deduplication logic.
|
||||
|
||||
Tests cover:
|
||||
- Hash computation produces consistent results
|
||||
- Hash is always 64 character lowercase hexadecimal
|
||||
- Different content produces different hashes
|
||||
- Binary content handling
|
||||
- Large file handling (streaming)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import hashlib
|
||||
import io
|
||||
from tests.conftest import (
|
||||
create_test_file,
|
||||
compute_sha256,
|
||||
TEST_CONTENT_HELLO,
|
||||
TEST_HASH_HELLO,
|
||||
TEST_CONTENT_BINARY,
|
||||
TEST_HASH_BINARY,
|
||||
)
|
||||
|
||||
|
||||
class TestHashComputation:
|
||||
"""Unit tests for hash calculation functionality."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_consistent_results(self):
|
||||
"""Test SHA256 hash produces consistent results for identical content."""
|
||||
content = b"test content for hashing"
|
||||
|
||||
# Compute hash multiple times
|
||||
hash1 = compute_sha256(content)
|
||||
hash2 = compute_sha256(content)
|
||||
hash3 = compute_sha256(content)
|
||||
|
||||
assert hash1 == hash2 == hash3
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_different_content_different_hash(self):
|
||||
"""Test SHA256 produces different hashes for different content."""
|
||||
content1 = b"content version 1"
|
||||
content2 = b"content version 2"
|
||||
|
||||
hash1 = compute_sha256(content1)
|
||||
hash2 = compute_sha256(content2)
|
||||
|
||||
assert hash1 != hash2
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_format_64_char_hex(self):
|
||||
"""Test SHA256 hash is always 64 character lowercase hexadecimal."""
|
||||
test_cases = [
|
||||
b"", # Empty
|
||||
b"a", # Single char
|
||||
b"Hello, World!", # Normal string
|
||||
bytes(range(256)), # All byte values
|
||||
b"x" * 10000, # Larger content
|
||||
]
|
||||
|
||||
for content in test_cases:
|
||||
hash_value = compute_sha256(content)
|
||||
|
||||
# Check length
|
||||
assert len(hash_value) == 64, (
|
||||
f"Hash length should be 64, got {len(hash_value)}"
|
||||
)
|
||||
|
||||
# Check lowercase
|
||||
assert hash_value == hash_value.lower(), "Hash should be lowercase"
|
||||
|
||||
# Check hexadecimal
|
||||
assert all(c in "0123456789abcdef" for c in hash_value), (
|
||||
"Hash should be hex"
|
||||
)
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_known_value(self):
|
||||
"""Test SHA256 produces expected hash for known input."""
|
||||
assert compute_sha256(TEST_CONTENT_HELLO) == TEST_HASH_HELLO
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_binary_content(self):
|
||||
"""Test SHA256 handles binary content correctly."""
|
||||
assert compute_sha256(TEST_CONTENT_BINARY) == TEST_HASH_BINARY
|
||||
|
||||
# Test with null bytes
|
||||
content_with_nulls = b"\x00\x00test\x00\x00"
|
||||
hash_value = compute_sha256(content_with_nulls)
|
||||
assert len(hash_value) == 64
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_streaming_computation(self):
|
||||
"""Test SHA256 can be computed in chunks (streaming)."""
|
||||
# Large content
|
||||
chunk_size = 8192
|
||||
total_size = chunk_size * 10 # 80KB
|
||||
content = b"x" * total_size
|
||||
|
||||
# Direct computation
|
||||
direct_hash = compute_sha256(content)
|
||||
|
||||
# Streaming computation
|
||||
hasher = hashlib.sha256()
|
||||
for i in range(0, total_size, chunk_size):
|
||||
hasher.update(content[i : i + chunk_size])
|
||||
streaming_hash = hasher.hexdigest()
|
||||
|
||||
assert direct_hash == streaming_hash
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_order_matters(self):
|
||||
"""Test that content order affects hash (not just content set)."""
|
||||
content1 = b"AB"
|
||||
content2 = b"BA"
|
||||
|
||||
assert compute_sha256(content1) != compute_sha256(content2)
|
||||
|
||||
|
||||
class TestStorageHashComputation:
|
||||
"""Tests for hash computation in the storage layer."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_computes_sha256(self, mock_storage):
|
||||
"""Test storage layer correctly computes SHA256 hash."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
assert result.sha256 == TEST_HASH_HELLO
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_computes_md5(self, mock_storage):
|
||||
"""Test storage layer also computes MD5 hash."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_md5 = hashlib.md5(content).hexdigest()
|
||||
assert result.md5 == expected_md5
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_computes_sha1(self, mock_storage):
|
||||
"""Test storage layer also computes SHA1 hash."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_sha1 = hashlib.sha1(content).hexdigest()
|
||||
assert result.sha1 == expected_sha1
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_returns_correct_size(self, mock_storage):
|
||||
"""Test storage layer returns correct file size."""
|
||||
content = b"test content with known size"
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
assert result.size == len(content)
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_generates_correct_s3_key(self, mock_storage):
|
||||
"""Test storage layer generates correct S3 key pattern."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# Key should be: fruits/{hash[:2]}/{hash[2:4]}/{hash}
|
||||
expected_key = (
|
||||
f"fruits/{TEST_HASH_HELLO[:2]}/{TEST_HASH_HELLO[2:4]}/{TEST_HASH_HELLO}"
|
||||
)
|
||||
assert result.s3_key == expected_key
|
||||
|
||||
|
||||
class TestHashEdgeCases:
|
||||
"""Edge case tests for hash computation."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_hash_empty_content_rejected(self, mock_storage):
|
||||
"""Test that empty content is rejected."""
|
||||
from app.storage import HashComputationError
|
||||
|
||||
file_obj = io.BytesIO(b"")
|
||||
|
||||
with pytest.raises(HashComputationError):
|
||||
mock_storage._store_simple(file_obj)
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_hash_large_file_streaming(self, mock_storage):
|
||||
"""Test hash computation for large files uses streaming."""
|
||||
# Create a 10MB file
|
||||
size = 10 * 1024 * 1024
|
||||
content = b"x" * size
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_hash = compute_sha256(content)
|
||||
assert result.sha256 == expected_hash
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_hash_special_bytes(self):
|
||||
"""Test hash handles all byte values correctly."""
|
||||
# All possible byte values
|
||||
content = bytes(range(256))
|
||||
hash_value = compute_sha256(content)
|
||||
|
||||
assert len(hash_value) == 64
|
||||
assert hash_value == TEST_HASH_BINARY
|
||||
Reference in New Issue
Block a user