Add storage abstraction, stats endpoints, garbage collection, and test infrastructure
- Add StorageBackend protocol for backend-agnostic storage interface - Add health check with storage and database connectivity verification - Add garbage collection endpoints for orphaned artifacts (ref_count=0) - Add deduplication statistics endpoints (/api/v1/stats, /stats/storage, /stats/deduplication) - Add per-project statistics endpoint - Add verify_integrity method for post-upload hash validation - Set up pytest infrastructure with mock S3 client - Add unit tests for hash calculation and duplicate detection
This commit is contained in:
@@ -1,6 +1,17 @@
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import BinaryIO, Tuple, Optional, Dict, Any, Generator, NamedTuple
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
BinaryIO,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
Generator,
|
||||
NamedTuple,
|
||||
Protocol,
|
||||
runtime_checkable,
|
||||
)
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
@@ -10,6 +21,133 @@ from .config import get_settings
|
||||
settings = get_settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Storage Backend Protocol/Interface (ISSUE 33)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StorageBackend(Protocol):
|
||||
"""
|
||||
Abstract protocol defining the interface for storage backends.
|
||||
|
||||
All storage implementations (S3, MinIO, future backends) must implement
|
||||
this interface to ensure consistent behavior across the application.
|
||||
|
||||
Note on Deduplication:
|
||||
- This system uses whole-file deduplication based on SHA256 hash
|
||||
- Partial/chunk-level deduplication is NOT supported (out of scope for MVP)
|
||||
- Files with identical content but different metadata are deduplicated
|
||||
"""
|
||||
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> "StorageResult":
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
|
||||
Content-addressable: if the file already exists (by hash), just return
|
||||
the existing hash without uploading again.
|
||||
|
||||
Args:
|
||||
file: File-like object to store
|
||||
content_length: Optional hint for file size (enables multipart upload)
|
||||
|
||||
Returns:
|
||||
StorageResult with sha256, size, s3_key, and optional checksums
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
S3ExistenceCheckError: If existence check fails after retries
|
||||
S3UploadError: If upload fails
|
||||
"""
|
||||
...
|
||||
|
||||
def get(self, s3_key: str) -> bytes:
|
||||
"""
|
||||
Retrieve a file by its storage key.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key (path) of the file
|
||||
|
||||
Returns:
|
||||
File content as bytes
|
||||
"""
|
||||
...
|
||||
|
||||
def get_stream(
|
||||
self, s3_key: str, range_header: Optional[str] = None
|
||||
) -> Tuple[Any, int, Optional[str]]:
|
||||
"""
|
||||
Get a streaming response for a file.
|
||||
|
||||
Supports range requests for partial downloads.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
range_header: Optional HTTP Range header value
|
||||
|
||||
Returns:
|
||||
Tuple of (stream, content_length, content_range)
|
||||
"""
|
||||
...
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""
|
||||
Delete a file from storage.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
def get_object_info(self, s3_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get object metadata without downloading content.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
|
||||
Returns:
|
||||
Dict with size, content_type, last_modified, etag, or None if not found
|
||||
"""
|
||||
...
|
||||
|
||||
def generate_presigned_url(
|
||||
self,
|
||||
s3_key: str,
|
||||
expiry: Optional[int] = None,
|
||||
response_content_type: Optional[str] = None,
|
||||
response_content_disposition: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a presigned URL for downloading an object.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expiry: URL expiry in seconds
|
||||
response_content_type: Override Content-Type header in response
|
||||
response_content_disposition: Override Content-Disposition header
|
||||
|
||||
Returns:
|
||||
Presigned URL string
|
||||
"""
|
||||
...
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
# Threshold for multipart upload (100MB)
|
||||
MULTIPART_THRESHOLD = 100 * 1024 * 1024
|
||||
# Chunk size for multipart upload (10MB)
|
||||
@@ -622,12 +760,68 @@ class S3Storage:
|
||||
)
|
||||
return url
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Performs a lightweight HEAD request on the bucket to verify connectivity.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
try:
|
||||
self.client.head_bucket(Bucket=self.bucket)
|
||||
return True
|
||||
except ClientError as e:
|
||||
logger.warning(f"Storage health check failed: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during storage health check: {e}")
|
||||
return False
|
||||
|
||||
def verify_integrity(self, s3_key: str, expected_sha256: str) -> bool:
|
||||
"""
|
||||
Verify the integrity of a stored object by downloading and re-hashing.
|
||||
|
||||
This is an expensive operation and should only be used for critical
|
||||
verification scenarios.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expected_sha256: The expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
True if hash matches, False otherwise
|
||||
"""
|
||||
try:
|
||||
content = self.get(s3_key)
|
||||
actual_hash = hashlib.sha256(content).hexdigest()
|
||||
if actual_hash != expected_sha256:
|
||||
logger.error(
|
||||
f"Integrity verification failed for {s3_key}: "
|
||||
f"expected {expected_sha256[:12]}..., got {actual_hash[:12]}..."
|
||||
)
|
||||
return False
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during integrity verification for {s3_key}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_storage = None
|
||||
_storage: Optional[S3Storage] = None
|
||||
|
||||
|
||||
def get_storage() -> S3Storage:
|
||||
def get_storage() -> StorageBackend:
|
||||
"""
|
||||
Get the configured storage backend instance.
|
||||
|
||||
Currently returns S3Storage (works with S3-compatible backends like MinIO).
|
||||
Future implementations may support backend selection via configuration.
|
||||
|
||||
Returns:
|
||||
StorageBackend instance
|
||||
"""
|
||||
global _storage
|
||||
if _storage is None:
|
||||
_storage = S3Storage()
|
||||
|
||||
Reference in New Issue
Block a user