Add ref_count management for deletions with atomic operations and error handling
This commit is contained in:
@@ -22,7 +22,9 @@ class Settings(BaseSettings):
|
||||
database_pool_size: int = 5 # Number of connections to keep open
|
||||
database_max_overflow: int = 10 # Max additional connections beyond pool_size
|
||||
database_pool_timeout: int = 30 # Seconds to wait for a connection from pool
|
||||
database_pool_recycle: int = 1800 # Recycle connections after this many seconds (30 min)
|
||||
database_pool_recycle: int = (
|
||||
1800 # Recycle connections after this many seconds (30 min)
|
||||
)
|
||||
|
||||
# S3
|
||||
s3_endpoint: str = ""
|
||||
@@ -31,10 +33,20 @@ class Settings(BaseSettings):
|
||||
s3_access_key_id: str = ""
|
||||
s3_secret_access_key: str = ""
|
||||
s3_use_path_style: bool = True
|
||||
s3_verify_ssl: bool = True # Set to False for self-signed certs (dev only)
|
||||
s3_connect_timeout: int = 10 # Connection timeout in seconds
|
||||
s3_read_timeout: int = 60 # Read timeout in seconds
|
||||
s3_max_retries: int = 3 # Max retry attempts for transient failures
|
||||
|
||||
# Upload settings
|
||||
max_file_size: int = 10 * 1024 * 1024 * 1024 # 10GB default max file size
|
||||
min_file_size: int = 1 # Minimum 1 byte (empty files rejected)
|
||||
|
||||
# Download settings
|
||||
download_mode: str = "presigned" # "presigned", "redirect", or "proxy"
|
||||
presigned_url_expiry: int = 3600 # Presigned URL expiry in seconds (default: 1 hour)
|
||||
presigned_url_expiry: int = (
|
||||
3600 # Presigned URL expiry in seconds (default: 1 hour)
|
||||
)
|
||||
|
||||
@property
|
||||
def database_url(self) -> str:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional, List, Dict, Any, Generic, TypeVar
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, field_validator
|
||||
from uuid import UUID
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -40,8 +40,28 @@ class ProjectResponse(BaseModel):
|
||||
|
||||
|
||||
# Package format and platform enums
|
||||
PACKAGE_FORMATS = ["generic", "npm", "pypi", "docker", "deb", "rpm", "maven", "nuget", "helm"]
|
||||
PACKAGE_PLATFORMS = ["any", "linux", "darwin", "windows", "linux-amd64", "linux-arm64", "darwin-amd64", "darwin-arm64", "windows-amd64"]
|
||||
PACKAGE_FORMATS = [
|
||||
"generic",
|
||||
"npm",
|
||||
"pypi",
|
||||
"docker",
|
||||
"deb",
|
||||
"rpm",
|
||||
"maven",
|
||||
"nuget",
|
||||
"helm",
|
||||
]
|
||||
PACKAGE_PLATFORMS = [
|
||||
"any",
|
||||
"linux",
|
||||
"darwin",
|
||||
"windows",
|
||||
"linux-amd64",
|
||||
"linux-arm64",
|
||||
"darwin-amd64",
|
||||
"darwin-arm64",
|
||||
"windows-amd64",
|
||||
]
|
||||
|
||||
|
||||
# Package schemas
|
||||
@@ -68,6 +88,7 @@ class PackageResponse(BaseModel):
|
||||
|
||||
class TagSummary(BaseModel):
|
||||
"""Lightweight tag info for embedding in package responses"""
|
||||
|
||||
name: str
|
||||
artifact_id: str
|
||||
created_at: datetime
|
||||
@@ -75,6 +96,7 @@ class TagSummary(BaseModel):
|
||||
|
||||
class PackageDetailResponse(BaseModel):
|
||||
"""Package with aggregated metadata"""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
name: str
|
||||
@@ -135,6 +157,7 @@ class TagResponse(BaseModel):
|
||||
|
||||
class TagDetailResponse(BaseModel):
|
||||
"""Tag with embedded artifact metadata"""
|
||||
|
||||
id: UUID
|
||||
package_id: UUID
|
||||
name: str
|
||||
@@ -154,6 +177,7 @@ class TagDetailResponse(BaseModel):
|
||||
|
||||
class TagHistoryResponse(BaseModel):
|
||||
"""History entry for tag changes"""
|
||||
|
||||
id: UUID
|
||||
tag_id: UUID
|
||||
old_artifact_id: Optional[str]
|
||||
@@ -167,6 +191,7 @@ class TagHistoryResponse(BaseModel):
|
||||
|
||||
class ArtifactTagInfo(BaseModel):
|
||||
"""Tag info for embedding in artifact responses"""
|
||||
|
||||
id: UUID
|
||||
name: str
|
||||
package_id: UUID
|
||||
@@ -176,6 +201,7 @@ class ArtifactTagInfo(BaseModel):
|
||||
|
||||
class ArtifactDetailResponse(BaseModel):
|
||||
"""Artifact with list of tags/packages referencing it"""
|
||||
|
||||
id: str
|
||||
sha256: str # Explicit SHA256 field (same as id)
|
||||
size: int
|
||||
@@ -196,6 +222,7 @@ class ArtifactDetailResponse(BaseModel):
|
||||
|
||||
class PackageArtifactResponse(BaseModel):
|
||||
"""Artifact with tags for package artifact listing"""
|
||||
|
||||
id: str
|
||||
sha256: str # Explicit SHA256 field (same as id)
|
||||
size: int
|
||||
@@ -226,20 +253,35 @@ class UploadResponse(BaseModel):
|
||||
s3_etag: Optional[str] = None
|
||||
format_metadata: Optional[Dict[str, Any]] = None
|
||||
deduplicated: bool = False
|
||||
ref_count: int = 1 # Current reference count after this upload
|
||||
|
||||
|
||||
# Resumable upload schemas
|
||||
class ResumableUploadInitRequest(BaseModel):
|
||||
"""Request to initiate a resumable upload"""
|
||||
|
||||
expected_hash: str # SHA256 hash of the file (client must compute)
|
||||
filename: str
|
||||
content_type: Optional[str] = None
|
||||
size: int
|
||||
tag: Optional[str] = None
|
||||
|
||||
@field_validator("expected_hash")
|
||||
@classmethod
|
||||
def validate_sha256_hash(cls, v: str) -> str:
|
||||
"""Validate that expected_hash is a valid 64-character lowercase hex SHA256 hash."""
|
||||
import re
|
||||
|
||||
if not re.match(r"^[a-f0-9]{64}$", v.lower()):
|
||||
raise ValueError(
|
||||
"expected_hash must be a valid 64-character lowercase hexadecimal SHA256 hash"
|
||||
)
|
||||
return v.lower() # Normalize to lowercase
|
||||
|
||||
|
||||
class ResumableUploadInitResponse(BaseModel):
|
||||
"""Response from initiating a resumable upload"""
|
||||
|
||||
upload_id: Optional[str] # None if file already exists
|
||||
already_exists: bool
|
||||
artifact_id: Optional[str] = None # Set if already_exists is True
|
||||
@@ -248,17 +290,20 @@ class ResumableUploadInitResponse(BaseModel):
|
||||
|
||||
class ResumableUploadPartResponse(BaseModel):
|
||||
"""Response from uploading a part"""
|
||||
|
||||
part_number: int
|
||||
etag: str
|
||||
|
||||
|
||||
class ResumableUploadCompleteRequest(BaseModel):
|
||||
"""Request to complete a resumable upload"""
|
||||
|
||||
tag: Optional[str] = None
|
||||
|
||||
|
||||
class ResumableUploadCompleteResponse(BaseModel):
|
||||
"""Response from completing a resumable upload"""
|
||||
|
||||
artifact_id: str
|
||||
size: int
|
||||
project: str
|
||||
@@ -268,6 +313,7 @@ class ResumableUploadCompleteResponse(BaseModel):
|
||||
|
||||
class ResumableUploadStatusResponse(BaseModel):
|
||||
"""Status of a resumable upload"""
|
||||
|
||||
upload_id: str
|
||||
uploaded_parts: List[int]
|
||||
total_uploaded_bytes: int
|
||||
@@ -288,6 +334,7 @@ class ConsumerResponse(BaseModel):
|
||||
# Global search schemas
|
||||
class SearchResultProject(BaseModel):
|
||||
"""Project result for global search"""
|
||||
|
||||
id: UUID
|
||||
name: str
|
||||
description: Optional[str]
|
||||
@@ -299,6 +346,7 @@ class SearchResultProject(BaseModel):
|
||||
|
||||
class SearchResultPackage(BaseModel):
|
||||
"""Package result for global search"""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
project_name: str
|
||||
@@ -312,6 +360,7 @@ class SearchResultPackage(BaseModel):
|
||||
|
||||
class SearchResultArtifact(BaseModel):
|
||||
"""Artifact/tag result for global search"""
|
||||
|
||||
tag_id: UUID
|
||||
tag_name: str
|
||||
artifact_id: str
|
||||
@@ -323,6 +372,7 @@ class SearchResultArtifact(BaseModel):
|
||||
|
||||
class GlobalSearchResponse(BaseModel):
|
||||
"""Combined search results across all entity types"""
|
||||
|
||||
query: str
|
||||
projects: List[SearchResultProject]
|
||||
packages: List[SearchResultPackage]
|
||||
@@ -333,6 +383,7 @@ class GlobalSearchResponse(BaseModel):
|
||||
# Presigned URL response
|
||||
class PresignedUrlResponse(BaseModel):
|
||||
"""Response containing a presigned URL for direct S3 download"""
|
||||
|
||||
url: str
|
||||
expires_at: datetime
|
||||
method: str = "GET"
|
||||
@@ -348,3 +399,131 @@ class PresignedUrlResponse(BaseModel):
|
||||
class HealthResponse(BaseModel):
|
||||
status: str
|
||||
version: str = "1.0.0"
|
||||
storage_healthy: Optional[bool] = None
|
||||
database_healthy: Optional[bool] = None
|
||||
|
||||
|
||||
# Garbage collection schemas
|
||||
class GarbageCollectionResponse(BaseModel):
|
||||
"""Response from garbage collection operation"""
|
||||
|
||||
artifacts_deleted: int
|
||||
bytes_freed: int
|
||||
artifact_ids: List[str]
|
||||
dry_run: bool
|
||||
|
||||
|
||||
class OrphanedArtifactResponse(BaseModel):
|
||||
"""Information about an orphaned artifact"""
|
||||
|
||||
id: str
|
||||
size: int
|
||||
created_at: datetime
|
||||
created_by: str
|
||||
original_name: Optional[str]
|
||||
|
||||
|
||||
# Storage statistics schemas
|
||||
class StorageStatsResponse(BaseModel):
|
||||
"""Global storage statistics"""
|
||||
|
||||
total_artifacts: int
|
||||
total_size_bytes: int
|
||||
unique_artifacts: int # Artifacts with ref_count > 0
|
||||
orphaned_artifacts: int # Artifacts with ref_count = 0
|
||||
orphaned_size_bytes: int
|
||||
total_uploads: int
|
||||
deduplicated_uploads: int
|
||||
deduplication_ratio: (
|
||||
float # total_uploads / unique_artifacts (if > 1, deduplication is working)
|
||||
)
|
||||
storage_saved_bytes: int # Bytes saved through deduplication
|
||||
|
||||
|
||||
class DeduplicationStatsResponse(BaseModel):
|
||||
"""Deduplication effectiveness statistics"""
|
||||
|
||||
total_logical_bytes: (
|
||||
int # Sum of all upload sizes (what would be stored without dedup)
|
||||
)
|
||||
total_physical_bytes: int # Actual storage used
|
||||
bytes_saved: int
|
||||
savings_percentage: float
|
||||
total_uploads: int
|
||||
unique_artifacts: int
|
||||
duplicate_uploads: int
|
||||
average_ref_count: float
|
||||
max_ref_count: int
|
||||
most_referenced_artifacts: List[Dict[str, Any]] # Top N most referenced
|
||||
|
||||
|
||||
class ProjectStatsResponse(BaseModel):
|
||||
"""Per-project statistics"""
|
||||
|
||||
project_id: str
|
||||
project_name: str
|
||||
package_count: int
|
||||
tag_count: int
|
||||
artifact_count: int
|
||||
total_size_bytes: int
|
||||
upload_count: int
|
||||
deduplicated_uploads: int
|
||||
storage_saved_bytes: int = 0 # Bytes saved through deduplication
|
||||
deduplication_ratio: float = 1.0 # upload_count / artifact_count
|
||||
|
||||
|
||||
class PackageStatsResponse(BaseModel):
|
||||
"""Per-package statistics"""
|
||||
|
||||
package_id: str
|
||||
package_name: str
|
||||
project_name: str
|
||||
tag_count: int
|
||||
artifact_count: int
|
||||
total_size_bytes: int
|
||||
upload_count: int
|
||||
deduplicated_uploads: int
|
||||
storage_saved_bytes: int = 0
|
||||
deduplication_ratio: float = 1.0
|
||||
|
||||
|
||||
class ArtifactStatsResponse(BaseModel):
|
||||
"""Per-artifact reference statistics"""
|
||||
|
||||
artifact_id: str
|
||||
sha256: str
|
||||
size: int
|
||||
ref_count: int
|
||||
storage_savings: int # (ref_count - 1) * size
|
||||
tags: List[Dict[str, Any]] # Tags referencing this artifact
|
||||
projects: List[str] # Projects using this artifact
|
||||
packages: List[str] # Packages using this artifact
|
||||
first_uploaded: Optional[datetime] = None
|
||||
last_referenced: Optional[datetime] = None
|
||||
|
||||
|
||||
class CrossProjectDeduplicationResponse(BaseModel):
|
||||
"""Cross-project deduplication statistics"""
|
||||
|
||||
shared_artifacts_count: int # Artifacts used in multiple projects
|
||||
total_cross_project_savings: int # Bytes saved by cross-project sharing
|
||||
shared_artifacts: List[Dict[str, Any]] # Details of shared artifacts
|
||||
|
||||
|
||||
class TimeBasedStatsResponse(BaseModel):
|
||||
"""Time-based deduplication statistics"""
|
||||
|
||||
period: str # "daily", "weekly", "monthly"
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
data_points: List[
|
||||
Dict[str, Any]
|
||||
] # List of {date, uploads, unique, duplicated, bytes_saved}
|
||||
|
||||
|
||||
class StatsReportResponse(BaseModel):
|
||||
"""Summary report in various formats"""
|
||||
|
||||
format: str # "json", "csv", "markdown"
|
||||
generated_at: datetime
|
||||
content: str # The report content
|
||||
|
||||
@@ -1,25 +1,201 @@
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import BinaryIO, Tuple, Optional, Dict, Any, Generator, NamedTuple
|
||||
from typing import (
|
||||
BinaryIO,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
Generator,
|
||||
NamedTuple,
|
||||
Protocol,
|
||||
runtime_checkable,
|
||||
)
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from botocore.exceptions import (
|
||||
ClientError,
|
||||
ConnectionError as BotoConnectionError,
|
||||
EndpointConnectionError,
|
||||
ReadTimeoutError,
|
||||
ConnectTimeoutError,
|
||||
)
|
||||
|
||||
from .config import get_settings
|
||||
|
||||
settings = get_settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Storage Backend Protocol/Interface (ISSUE 33)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StorageBackend(Protocol):
|
||||
"""
|
||||
Abstract protocol defining the interface for storage backends.
|
||||
|
||||
All storage implementations (S3, MinIO, future backends) must implement
|
||||
this interface to ensure consistent behavior across the application.
|
||||
|
||||
Note on Deduplication:
|
||||
- This system uses whole-file deduplication based on SHA256 hash
|
||||
- Partial/chunk-level deduplication is NOT supported (out of scope for MVP)
|
||||
- Files with identical content but different metadata are deduplicated
|
||||
"""
|
||||
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> "StorageResult":
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
|
||||
Content-addressable: if the file already exists (by hash), just return
|
||||
the existing hash without uploading again.
|
||||
|
||||
Args:
|
||||
file: File-like object to store
|
||||
content_length: Optional hint for file size (enables multipart upload)
|
||||
|
||||
Returns:
|
||||
StorageResult with sha256, size, s3_key, and optional checksums
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
S3ExistenceCheckError: If existence check fails after retries
|
||||
S3UploadError: If upload fails
|
||||
"""
|
||||
...
|
||||
|
||||
def get(self, s3_key: str) -> bytes:
|
||||
"""
|
||||
Retrieve a file by its storage key.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key (path) of the file
|
||||
|
||||
Returns:
|
||||
File content as bytes
|
||||
"""
|
||||
...
|
||||
|
||||
def get_stream(
|
||||
self, s3_key: str, range_header: Optional[str] = None
|
||||
) -> Tuple[Any, int, Optional[str]]:
|
||||
"""
|
||||
Get a streaming response for a file.
|
||||
|
||||
Supports range requests for partial downloads.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
range_header: Optional HTTP Range header value
|
||||
|
||||
Returns:
|
||||
Tuple of (stream, content_length, content_range)
|
||||
"""
|
||||
...
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""
|
||||
Delete a file from storage.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
def get_object_info(self, s3_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get object metadata without downloading content.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
|
||||
Returns:
|
||||
Dict with size, content_type, last_modified, etag, or None if not found
|
||||
"""
|
||||
...
|
||||
|
||||
def generate_presigned_url(
|
||||
self,
|
||||
s3_key: str,
|
||||
expiry: Optional[int] = None,
|
||||
response_content_type: Optional[str] = None,
|
||||
response_content_disposition: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a presigned URL for downloading an object.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expiry: URL expiry in seconds
|
||||
response_content_type: Override Content-Type header in response
|
||||
response_content_disposition: Override Content-Disposition header
|
||||
|
||||
Returns:
|
||||
Presigned URL string
|
||||
"""
|
||||
...
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
# Threshold for multipart upload (100MB)
|
||||
MULTIPART_THRESHOLD = 100 * 1024 * 1024
|
||||
# Chunk size for multipart upload (10MB)
|
||||
MULTIPART_CHUNK_SIZE = 10 * 1024 * 1024
|
||||
# Chunk size for streaming hash computation
|
||||
HASH_CHUNK_SIZE = 8 * 1024 * 1024
|
||||
# Maximum retries for S3 existence check
|
||||
MAX_EXISTENCE_CHECK_RETRIES = 3
|
||||
|
||||
|
||||
class StorageError(Exception):
|
||||
"""Base exception for storage operations"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class HashComputationError(StorageError):
|
||||
"""Raised when hash computation fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FileSizeExceededError(StorageError):
|
||||
"""Raised when file exceeds maximum size during upload"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3ExistenceCheckError(StorageError):
|
||||
"""Raised when S3 existence check fails after retries"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3UploadError(StorageError):
|
||||
"""Raised when S3 upload fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StorageResult(NamedTuple):
|
||||
"""Result of storing a file with all computed checksums"""
|
||||
|
||||
sha256: str
|
||||
size: int
|
||||
s3_key: str
|
||||
@@ -28,9 +204,34 @@ class StorageResult(NamedTuple):
|
||||
s3_etag: Optional[str] = None
|
||||
|
||||
|
||||
class S3StorageUnavailableError(StorageError):
|
||||
"""Raised when S3 storage backend is unavailable"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class HashCollisionError(StorageError):
|
||||
"""Raised when a hash collision is detected (extremely rare)"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3Storage:
|
||||
def __init__(self):
|
||||
config = Config(s3={"addressing_style": "path"} if settings.s3_use_path_style else {})
|
||||
# Build config with retry and timeout settings
|
||||
s3_config = {}
|
||||
if settings.s3_use_path_style:
|
||||
s3_config["addressing_style"] = "path"
|
||||
|
||||
config = Config(
|
||||
s3=s3_config if s3_config else None,
|
||||
connect_timeout=settings.s3_connect_timeout,
|
||||
read_timeout=settings.s3_read_timeout,
|
||||
retries={
|
||||
"max_attempts": settings.s3_max_retries,
|
||||
"mode": "adaptive", # Adaptive retry mode for better handling
|
||||
},
|
||||
)
|
||||
|
||||
self.client = boto3.client(
|
||||
"s3",
|
||||
@@ -39,12 +240,15 @@ class S3Storage:
|
||||
aws_access_key_id=settings.s3_access_key_id,
|
||||
aws_secret_access_key=settings.s3_secret_access_key,
|
||||
config=config,
|
||||
verify=settings.s3_verify_ssl, # SSL/TLS verification
|
||||
)
|
||||
self.bucket = settings.s3_bucket
|
||||
# Store active multipart uploads for resumable support
|
||||
self._active_uploads: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
def store(self, file: BinaryIO, content_length: Optional[int] = None) -> StorageResult:
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> StorageResult:
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
Content-addressable: if the file already exists, just return the hash.
|
||||
@@ -57,30 +261,91 @@ class S3Storage:
|
||||
return self._store_multipart(file, content_length)
|
||||
|
||||
def _store_simple(self, file: BinaryIO) -> StorageResult:
|
||||
"""Store a small file using simple put_object"""
|
||||
# Read file and compute all hashes
|
||||
content = file.read()
|
||||
sha256_hash = hashlib.sha256(content).hexdigest()
|
||||
md5_hash = hashlib.md5(content).hexdigest()
|
||||
sha1_hash = hashlib.sha1(content).hexdigest()
|
||||
size = len(content)
|
||||
"""
|
||||
Store a small file using simple put_object.
|
||||
|
||||
# Check if already exists
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
FileSizeExceededError: If file exceeds maximum size
|
||||
S3ExistenceCheckError: If S3 existence check fails after retries
|
||||
S3UploadError: If S3 upload fails
|
||||
"""
|
||||
# Read file and compute all hashes with error handling
|
||||
try:
|
||||
content = file.read()
|
||||
if not content:
|
||||
raise HashComputationError("Empty file content")
|
||||
|
||||
size = len(content)
|
||||
|
||||
# Enforce file size limit (protection against Content-Length spoofing)
|
||||
if size > settings.max_file_size:
|
||||
raise FileSizeExceededError(
|
||||
f"File size {size} exceeds maximum {settings.max_file_size}"
|
||||
)
|
||||
|
||||
sha256_hash = hashlib.sha256(content).hexdigest()
|
||||
md5_hash = hashlib.md5(content).hexdigest()
|
||||
sha1_hash = hashlib.sha1(content).hexdigest()
|
||||
except (HashComputationError, FileSizeExceededError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Hash computation failed: {e}")
|
||||
raise HashComputationError(f"Failed to compute hash: {e}") from e
|
||||
|
||||
# Check if already exists (with retry logic)
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
s3_etag = None
|
||||
|
||||
if not self._exists(s3_key):
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket,
|
||||
Key=s3_key,
|
||||
Body=content,
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
try:
|
||||
exists = self._exists(s3_key)
|
||||
except S3ExistenceCheckError:
|
||||
# Re-raise the specific error
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during S3 existence check: {e}")
|
||||
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
|
||||
|
||||
if not exists:
|
||||
try:
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket,
|
||||
Key=s3_key,
|
||||
Body=content,
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
except (EndpointConnectionError, BotoConnectionError) as e:
|
||||
logger.error(f"S3 storage unavailable: {e}")
|
||||
raise S3StorageUnavailableError(
|
||||
f"Storage backend unavailable: {e}"
|
||||
) from e
|
||||
except (ReadTimeoutError, ConnectTimeoutError) as e:
|
||||
logger.error(f"S3 operation timed out: {e}")
|
||||
raise S3UploadError(f"Upload timed out: {e}") from e
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code", "")
|
||||
if error_code == "ServiceUnavailable":
|
||||
logger.error(f"S3 service unavailable: {e}")
|
||||
raise S3StorageUnavailableError(
|
||||
f"Storage service unavailable: {e}"
|
||||
) from e
|
||||
logger.error(f"S3 upload failed: {e}")
|
||||
raise S3UploadError(f"Failed to upload to S3: {e}") from e
|
||||
else:
|
||||
# Get existing ETag
|
||||
# Get existing ETag and verify integrity (detect potential hash collision)
|
||||
obj_info = self.get_object_info(s3_key)
|
||||
if obj_info:
|
||||
s3_etag = obj_info.get("etag", "").strip('"')
|
||||
# Check for hash collision by comparing size
|
||||
existing_size = obj_info.get("size", 0)
|
||||
if existing_size != size:
|
||||
logger.critical(
|
||||
f"HASH COLLISION DETECTED! Hash {sha256_hash} has size mismatch: "
|
||||
f"existing={existing_size}, new={size}. This is extremely rare."
|
||||
)
|
||||
raise HashCollisionError(
|
||||
f"Hash collision detected for {sha256_hash}: size mismatch"
|
||||
)
|
||||
|
||||
return StorageResult(
|
||||
sha256=sha256_hash,
|
||||
@@ -92,32 +357,75 @@ class S3Storage:
|
||||
)
|
||||
|
||||
def _store_multipart(self, file: BinaryIO, content_length: int) -> StorageResult:
|
||||
"""Store a large file using S3 multipart upload with streaming hash computation"""
|
||||
"""
|
||||
Store a large file using S3 multipart upload with streaming hash computation.
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
FileSizeExceededError: If file exceeds maximum size
|
||||
S3ExistenceCheckError: If S3 existence check fails after retries
|
||||
S3UploadError: If S3 upload fails
|
||||
"""
|
||||
# First pass: compute all hashes by streaming through file
|
||||
sha256_hasher = hashlib.sha256()
|
||||
md5_hasher = hashlib.md5()
|
||||
sha1_hasher = hashlib.sha1()
|
||||
size = 0
|
||||
try:
|
||||
sha256_hasher = hashlib.sha256()
|
||||
md5_hasher = hashlib.md5()
|
||||
sha1_hasher = hashlib.sha1()
|
||||
size = 0
|
||||
|
||||
# Read file in chunks to compute hashes
|
||||
while True:
|
||||
chunk = file.read(HASH_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
sha256_hasher.update(chunk)
|
||||
md5_hasher.update(chunk)
|
||||
sha1_hasher.update(chunk)
|
||||
size += len(chunk)
|
||||
# Read file in chunks to compute hashes
|
||||
while True:
|
||||
chunk = file.read(HASH_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
sha256_hasher.update(chunk)
|
||||
md5_hasher.update(chunk)
|
||||
sha1_hasher.update(chunk)
|
||||
size += len(chunk)
|
||||
|
||||
# Enforce file size limit during streaming (protection against spoofing)
|
||||
if size > settings.max_file_size:
|
||||
raise FileSizeExceededError(
|
||||
f"File size exceeds maximum {settings.max_file_size}"
|
||||
)
|
||||
|
||||
if size == 0:
|
||||
raise HashComputationError("Empty file content")
|
||||
|
||||
sha256_hash = sha256_hasher.hexdigest()
|
||||
md5_hash = md5_hasher.hexdigest()
|
||||
sha1_hash = sha1_hasher.hexdigest()
|
||||
except (HashComputationError, FileSizeExceededError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Hash computation failed for multipart upload: {e}")
|
||||
raise HashComputationError(f"Failed to compute hash: {e}") from e
|
||||
|
||||
sha256_hash = sha256_hasher.hexdigest()
|
||||
md5_hash = md5_hasher.hexdigest()
|
||||
sha1_hash = sha1_hasher.hexdigest()
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
# Check if already exists (deduplication)
|
||||
if self._exists(s3_key):
|
||||
# Check if already exists (deduplication) with retry logic
|
||||
try:
|
||||
exists = self._exists(s3_key)
|
||||
except S3ExistenceCheckError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during S3 existence check: {e}")
|
||||
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
|
||||
|
||||
if exists:
|
||||
obj_info = self.get_object_info(s3_key)
|
||||
s3_etag = obj_info.get("etag", "").strip('"') if obj_info else None
|
||||
# Check for hash collision by comparing size
|
||||
if obj_info:
|
||||
existing_size = obj_info.get("size", 0)
|
||||
if existing_size != size:
|
||||
logger.critical(
|
||||
f"HASH COLLISION DETECTED! Hash {sha256_hash} has size mismatch: "
|
||||
f"existing={existing_size}, new={size}. This is extremely rare."
|
||||
)
|
||||
raise HashCollisionError(
|
||||
f"Hash collision detected for {sha256_hash}: size mismatch"
|
||||
)
|
||||
return StorageResult(
|
||||
sha256=sha256_hash,
|
||||
size=size,
|
||||
@@ -131,7 +439,11 @@ class S3Storage:
|
||||
file.seek(0)
|
||||
|
||||
# Start multipart upload
|
||||
mpu = self.client.create_multipart_upload(Bucket=self.bucket, Key=s3_key)
|
||||
try:
|
||||
mpu = self.client.create_multipart_upload(Bucket=self.bucket, Key=s3_key)
|
||||
except (EndpointConnectionError, BotoConnectionError) as e:
|
||||
logger.error(f"S3 storage unavailable for multipart upload: {e}")
|
||||
raise S3StorageUnavailableError(f"Storage backend unavailable: {e}") from e
|
||||
upload_id = mpu["UploadId"]
|
||||
|
||||
try:
|
||||
@@ -150,10 +462,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=chunk,
|
||||
)
|
||||
parts.append({
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
}
|
||||
)
|
||||
part_number += 1
|
||||
|
||||
# Complete multipart upload
|
||||
@@ -226,7 +540,9 @@ class S3Storage:
|
||||
# Upload based on size
|
||||
if size < MULTIPART_THRESHOLD:
|
||||
content = b"".join(all_chunks)
|
||||
response = self.client.put_object(Bucket=self.bucket, Key=s3_key, Body=content)
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket, Key=s3_key, Body=content
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
else:
|
||||
# Use multipart for large files
|
||||
@@ -251,10 +567,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=part_data,
|
||||
)
|
||||
parts.append({
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
}
|
||||
)
|
||||
part_number += 1
|
||||
|
||||
# Upload remaining buffer
|
||||
@@ -266,10 +584,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=buffer,
|
||||
)
|
||||
parts.append({
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
}
|
||||
)
|
||||
|
||||
complete_response = self.client.complete_multipart_upload(
|
||||
Bucket=self.bucket,
|
||||
@@ -326,7 +646,9 @@ class S3Storage:
|
||||
self._active_uploads[upload_id] = session
|
||||
return session
|
||||
|
||||
def upload_part(self, upload_id: str, part_number: int, data: bytes) -> Dict[str, Any]:
|
||||
def upload_part(
|
||||
self, upload_id: str, part_number: int, data: bytes
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Upload a part for a resumable upload.
|
||||
Returns part info including ETag.
|
||||
@@ -434,13 +756,50 @@ class S3Storage:
|
||||
except ClientError:
|
||||
return None
|
||||
|
||||
def _exists(self, s3_key: str) -> bool:
|
||||
"""Check if an object exists"""
|
||||
try:
|
||||
self.client.head_object(Bucket=self.bucket, Key=s3_key)
|
||||
return True
|
||||
except ClientError:
|
||||
return False
|
||||
def _exists(self, s3_key: str, retry: bool = True) -> bool:
|
||||
"""
|
||||
Check if an object exists with optional retry logic.
|
||||
|
||||
Args:
|
||||
s3_key: The S3 key to check
|
||||
retry: Whether to retry on transient failures (default: True)
|
||||
|
||||
Returns:
|
||||
True if object exists, False otherwise
|
||||
|
||||
Raises:
|
||||
S3ExistenceCheckError: If all retries fail due to non-404 errors
|
||||
"""
|
||||
import time
|
||||
|
||||
max_retries = MAX_EXISTENCE_CHECK_RETRIES if retry else 1
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
self.client.head_object(Bucket=self.bucket, Key=s3_key)
|
||||
return True
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code", "")
|
||||
# 404 means object doesn't exist - not an error
|
||||
if error_code in ("404", "NoSuchKey"):
|
||||
return False
|
||||
|
||||
# For other errors, retry
|
||||
last_error = e
|
||||
if attempt < max_retries - 1:
|
||||
logger.warning(
|
||||
f"S3 existence check failed (attempt {attempt + 1}/{max_retries}): {e}"
|
||||
)
|
||||
time.sleep(0.1 * (attempt + 1)) # Exponential backoff
|
||||
|
||||
# All retries failed
|
||||
logger.error(
|
||||
f"S3 existence check failed after {max_retries} attempts: {last_error}"
|
||||
)
|
||||
raise S3ExistenceCheckError(
|
||||
f"Failed to check S3 object existence after {max_retries} attempts: {last_error}"
|
||||
)
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""Delete an object"""
|
||||
@@ -490,12 +849,68 @@ class S3Storage:
|
||||
)
|
||||
return url
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Performs a lightweight HEAD request on the bucket to verify connectivity.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
try:
|
||||
self.client.head_bucket(Bucket=self.bucket)
|
||||
return True
|
||||
except ClientError as e:
|
||||
logger.warning(f"Storage health check failed: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during storage health check: {e}")
|
||||
return False
|
||||
|
||||
def verify_integrity(self, s3_key: str, expected_sha256: str) -> bool:
|
||||
"""
|
||||
Verify the integrity of a stored object by downloading and re-hashing.
|
||||
|
||||
This is an expensive operation and should only be used for critical
|
||||
verification scenarios.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expected_sha256: The expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
True if hash matches, False otherwise
|
||||
"""
|
||||
try:
|
||||
content = self.get(s3_key)
|
||||
actual_hash = hashlib.sha256(content).hexdigest()
|
||||
if actual_hash != expected_sha256:
|
||||
logger.error(
|
||||
f"Integrity verification failed for {s3_key}: "
|
||||
f"expected {expected_sha256[:12]}..., got {actual_hash[:12]}..."
|
||||
)
|
||||
return False
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during integrity verification for {s3_key}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_storage = None
|
||||
_storage: Optional[S3Storage] = None
|
||||
|
||||
|
||||
def get_storage() -> S3Storage:
|
||||
def get_storage() -> StorageBackend:
|
||||
"""
|
||||
Get the configured storage backend instance.
|
||||
|
||||
Currently returns S3Storage (works with S3-compatible backends like MinIO).
|
||||
Future implementations may support backend selection via configuration.
|
||||
|
||||
Returns:
|
||||
StorageBackend instance
|
||||
"""
|
||||
global _storage
|
||||
if _storage is None:
|
||||
_storage = S3Storage()
|
||||
|
||||
Reference in New Issue
Block a user