Add ref_count management for deletions with atomic operations and error handling

- Add DELETE endpoints for tags, packages, and projects with proper ref_count
  decrements for all affected artifacts
- Implement atomic ref_count operations using SELECT FOR UPDATE row-level locking
  to prevent race conditions
- Add custom storage exceptions (HashComputationError, S3ExistenceCheckError,
  S3UploadError) with retry logic for S3 existence checks
- Handle race conditions in upload by locking artifact row before modification
- Add comprehensive logging for all ref_count changes and deduplication events
- Include ref_count in upload response schema
This commit is contained in:
Mondo Diaz
2026-01-05 10:04:59 -06:00
parent 66622caf5d
commit 865812af98
3 changed files with 1175 additions and 314 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -40,8 +40,28 @@ class ProjectResponse(BaseModel):
# Package format and platform enums # Package format and platform enums
PACKAGE_FORMATS = ["generic", "npm", "pypi", "docker", "deb", "rpm", "maven", "nuget", "helm"] PACKAGE_FORMATS = [
PACKAGE_PLATFORMS = ["any", "linux", "darwin", "windows", "linux-amd64", "linux-arm64", "darwin-amd64", "darwin-arm64", "windows-amd64"] "generic",
"npm",
"pypi",
"docker",
"deb",
"rpm",
"maven",
"nuget",
"helm",
]
PACKAGE_PLATFORMS = [
"any",
"linux",
"darwin",
"windows",
"linux-amd64",
"linux-arm64",
"darwin-amd64",
"darwin-arm64",
"windows-amd64",
]
# Package schemas # Package schemas
@@ -68,6 +88,7 @@ class PackageResponse(BaseModel):
class TagSummary(BaseModel): class TagSummary(BaseModel):
"""Lightweight tag info for embedding in package responses""" """Lightweight tag info for embedding in package responses"""
name: str name: str
artifact_id: str artifact_id: str
created_at: datetime created_at: datetime
@@ -75,6 +96,7 @@ class TagSummary(BaseModel):
class PackageDetailResponse(BaseModel): class PackageDetailResponse(BaseModel):
"""Package with aggregated metadata""" """Package with aggregated metadata"""
id: UUID id: UUID
project_id: UUID project_id: UUID
name: str name: str
@@ -135,6 +157,7 @@ class TagResponse(BaseModel):
class TagDetailResponse(BaseModel): class TagDetailResponse(BaseModel):
"""Tag with embedded artifact metadata""" """Tag with embedded artifact metadata"""
id: UUID id: UUID
package_id: UUID package_id: UUID
name: str name: str
@@ -154,6 +177,7 @@ class TagDetailResponse(BaseModel):
class TagHistoryResponse(BaseModel): class TagHistoryResponse(BaseModel):
"""History entry for tag changes""" """History entry for tag changes"""
id: UUID id: UUID
tag_id: UUID tag_id: UUID
old_artifact_id: Optional[str] old_artifact_id: Optional[str]
@@ -167,6 +191,7 @@ class TagHistoryResponse(BaseModel):
class ArtifactTagInfo(BaseModel): class ArtifactTagInfo(BaseModel):
"""Tag info for embedding in artifact responses""" """Tag info for embedding in artifact responses"""
id: UUID id: UUID
name: str name: str
package_id: UUID package_id: UUID
@@ -176,6 +201,7 @@ class ArtifactTagInfo(BaseModel):
class ArtifactDetailResponse(BaseModel): class ArtifactDetailResponse(BaseModel):
"""Artifact with list of tags/packages referencing it""" """Artifact with list of tags/packages referencing it"""
id: str id: str
sha256: str # Explicit SHA256 field (same as id) sha256: str # Explicit SHA256 field (same as id)
size: int size: int
@@ -196,6 +222,7 @@ class ArtifactDetailResponse(BaseModel):
class PackageArtifactResponse(BaseModel): class PackageArtifactResponse(BaseModel):
"""Artifact with tags for package artifact listing""" """Artifact with tags for package artifact listing"""
id: str id: str
sha256: str # Explicit SHA256 field (same as id) sha256: str # Explicit SHA256 field (same as id)
size: int size: int
@@ -226,11 +253,13 @@ class UploadResponse(BaseModel):
s3_etag: Optional[str] = None s3_etag: Optional[str] = None
format_metadata: Optional[Dict[str, Any]] = None format_metadata: Optional[Dict[str, Any]] = None
deduplicated: bool = False deduplicated: bool = False
ref_count: int = 1 # Current reference count after this upload
# Resumable upload schemas # Resumable upload schemas
class ResumableUploadInitRequest(BaseModel): class ResumableUploadInitRequest(BaseModel):
"""Request to initiate a resumable upload""" """Request to initiate a resumable upload"""
expected_hash: str # SHA256 hash of the file (client must compute) expected_hash: str # SHA256 hash of the file (client must compute)
filename: str filename: str
content_type: Optional[str] = None content_type: Optional[str] = None
@@ -240,6 +269,7 @@ class ResumableUploadInitRequest(BaseModel):
class ResumableUploadInitResponse(BaseModel): class ResumableUploadInitResponse(BaseModel):
"""Response from initiating a resumable upload""" """Response from initiating a resumable upload"""
upload_id: Optional[str] # None if file already exists upload_id: Optional[str] # None if file already exists
already_exists: bool already_exists: bool
artifact_id: Optional[str] = None # Set if already_exists is True artifact_id: Optional[str] = None # Set if already_exists is True
@@ -248,17 +278,20 @@ class ResumableUploadInitResponse(BaseModel):
class ResumableUploadPartResponse(BaseModel): class ResumableUploadPartResponse(BaseModel):
"""Response from uploading a part""" """Response from uploading a part"""
part_number: int part_number: int
etag: str etag: str
class ResumableUploadCompleteRequest(BaseModel): class ResumableUploadCompleteRequest(BaseModel):
"""Request to complete a resumable upload""" """Request to complete a resumable upload"""
tag: Optional[str] = None tag: Optional[str] = None
class ResumableUploadCompleteResponse(BaseModel): class ResumableUploadCompleteResponse(BaseModel):
"""Response from completing a resumable upload""" """Response from completing a resumable upload"""
artifact_id: str artifact_id: str
size: int size: int
project: str project: str
@@ -268,6 +301,7 @@ class ResumableUploadCompleteResponse(BaseModel):
class ResumableUploadStatusResponse(BaseModel): class ResumableUploadStatusResponse(BaseModel):
"""Status of a resumable upload""" """Status of a resumable upload"""
upload_id: str upload_id: str
uploaded_parts: List[int] uploaded_parts: List[int]
total_uploaded_bytes: int total_uploaded_bytes: int
@@ -288,6 +322,7 @@ class ConsumerResponse(BaseModel):
# Global search schemas # Global search schemas
class SearchResultProject(BaseModel): class SearchResultProject(BaseModel):
"""Project result for global search""" """Project result for global search"""
id: UUID id: UUID
name: str name: str
description: Optional[str] description: Optional[str]
@@ -299,6 +334,7 @@ class SearchResultProject(BaseModel):
class SearchResultPackage(BaseModel): class SearchResultPackage(BaseModel):
"""Package result for global search""" """Package result for global search"""
id: UUID id: UUID
project_id: UUID project_id: UUID
project_name: str project_name: str
@@ -312,6 +348,7 @@ class SearchResultPackage(BaseModel):
class SearchResultArtifact(BaseModel): class SearchResultArtifact(BaseModel):
"""Artifact/tag result for global search""" """Artifact/tag result for global search"""
tag_id: UUID tag_id: UUID
tag_name: str tag_name: str
artifact_id: str artifact_id: str
@@ -323,6 +360,7 @@ class SearchResultArtifact(BaseModel):
class GlobalSearchResponse(BaseModel): class GlobalSearchResponse(BaseModel):
"""Combined search results across all entity types""" """Combined search results across all entity types"""
query: str query: str
projects: List[SearchResultProject] projects: List[SearchResultProject]
packages: List[SearchResultPackage] packages: List[SearchResultPackage]
@@ -333,6 +371,7 @@ class GlobalSearchResponse(BaseModel):
# Presigned URL response # Presigned URL response
class PresignedUrlResponse(BaseModel): class PresignedUrlResponse(BaseModel):
"""Response containing a presigned URL for direct S3 download""" """Response containing a presigned URL for direct S3 download"""
url: str url: str
expires_at: datetime expires_at: datetime
method: str = "GET" method: str = "GET"

View File

@@ -16,10 +16,37 @@ MULTIPART_THRESHOLD = 100 * 1024 * 1024
MULTIPART_CHUNK_SIZE = 10 * 1024 * 1024 MULTIPART_CHUNK_SIZE = 10 * 1024 * 1024
# Chunk size for streaming hash computation # Chunk size for streaming hash computation
HASH_CHUNK_SIZE = 8 * 1024 * 1024 HASH_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum retries for S3 existence check
MAX_EXISTENCE_CHECK_RETRIES = 3
class StorageError(Exception):
"""Base exception for storage operations"""
pass
class HashComputationError(StorageError):
"""Raised when hash computation fails"""
pass
class S3ExistenceCheckError(StorageError):
"""Raised when S3 existence check fails after retries"""
pass
class S3UploadError(StorageError):
"""Raised when S3 upload fails"""
pass
class StorageResult(NamedTuple): class StorageResult(NamedTuple):
"""Result of storing a file with all computed checksums""" """Result of storing a file with all computed checksums"""
sha256: str sha256: str
size: int size: int
s3_key: str s3_key: str
@@ -30,7 +57,9 @@ class StorageResult(NamedTuple):
class S3Storage: class S3Storage:
def __init__(self): def __init__(self):
config = Config(s3={"addressing_style": "path"} if settings.s3_use_path_style else {}) config = Config(
s3={"addressing_style": "path"} if settings.s3_use_path_style else {}
)
self.client = boto3.client( self.client = boto3.client(
"s3", "s3",
@@ -44,7 +73,9 @@ class S3Storage:
# Store active multipart uploads for resumable support # Store active multipart uploads for resumable support
self._active_uploads: Dict[str, Dict[str, Any]] = {} self._active_uploads: Dict[str, Dict[str, Any]] = {}
def store(self, file: BinaryIO, content_length: Optional[int] = None) -> StorageResult: def store(
self, file: BinaryIO, content_length: Optional[int] = None
) -> StorageResult:
""" """
Store a file and return StorageResult with all checksums. Store a file and return StorageResult with all checksums.
Content-addressable: if the file already exists, just return the hash. Content-addressable: if the file already exists, just return the hash.
@@ -57,25 +88,54 @@ class S3Storage:
return self._store_multipart(file, content_length) return self._store_multipart(file, content_length)
def _store_simple(self, file: BinaryIO) -> StorageResult: def _store_simple(self, file: BinaryIO) -> StorageResult:
"""Store a small file using simple put_object""" """
# Read file and compute all hashes Store a small file using simple put_object.
content = file.read()
sha256_hash = hashlib.sha256(content).hexdigest()
md5_hash = hashlib.md5(content).hexdigest()
sha1_hash = hashlib.sha1(content).hexdigest()
size = len(content)
# Check if already exists Raises:
HashComputationError: If hash computation fails
S3ExistenceCheckError: If S3 existence check fails after retries
S3UploadError: If S3 upload fails
"""
# Read file and compute all hashes with error handling
try:
content = file.read()
if not content:
raise HashComputationError("Empty file content")
sha256_hash = hashlib.sha256(content).hexdigest()
md5_hash = hashlib.md5(content).hexdigest()
sha1_hash = hashlib.sha1(content).hexdigest()
size = len(content)
except HashComputationError:
raise
except Exception as e:
logger.error(f"Hash computation failed: {e}")
raise HashComputationError(f"Failed to compute hash: {e}") from e
# Check if already exists (with retry logic)
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}" s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
s3_etag = None s3_etag = None
if not self._exists(s3_key): try:
response = self.client.put_object( exists = self._exists(s3_key)
Bucket=self.bucket, except S3ExistenceCheckError:
Key=s3_key, # Re-raise the specific error
Body=content, raise
) except Exception as e:
s3_etag = response.get("ETag", "").strip('"') logger.error(f"Unexpected error during S3 existence check: {e}")
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
if not exists:
try:
response = self.client.put_object(
Bucket=self.bucket,
Key=s3_key,
Body=content,
)
s3_etag = response.get("ETag", "").strip('"')
except ClientError as e:
logger.error(f"S3 upload failed: {e}")
raise S3UploadError(f"Failed to upload to S3: {e}") from e
else: else:
# Get existing ETag # Get existing ETag
obj_info = self.get_object_info(s3_key) obj_info = self.get_object_info(s3_key)
@@ -92,30 +152,55 @@ class S3Storage:
) )
def _store_multipart(self, file: BinaryIO, content_length: int) -> StorageResult: def _store_multipart(self, file: BinaryIO, content_length: int) -> StorageResult:
"""Store a large file using S3 multipart upload with streaming hash computation""" """
Store a large file using S3 multipart upload with streaming hash computation.
Raises:
HashComputationError: If hash computation fails
S3ExistenceCheckError: If S3 existence check fails after retries
S3UploadError: If S3 upload fails
"""
# First pass: compute all hashes by streaming through file # First pass: compute all hashes by streaming through file
sha256_hasher = hashlib.sha256() try:
md5_hasher = hashlib.md5() sha256_hasher = hashlib.sha256()
sha1_hasher = hashlib.sha1() md5_hasher = hashlib.md5()
size = 0 sha1_hasher = hashlib.sha1()
size = 0
# Read file in chunks to compute hashes # Read file in chunks to compute hashes
while True: while True:
chunk = file.read(HASH_CHUNK_SIZE) chunk = file.read(HASH_CHUNK_SIZE)
if not chunk: if not chunk:
break break
sha256_hasher.update(chunk) sha256_hasher.update(chunk)
md5_hasher.update(chunk) md5_hasher.update(chunk)
sha1_hasher.update(chunk) sha1_hasher.update(chunk)
size += len(chunk) size += len(chunk)
if size == 0:
raise HashComputationError("Empty file content")
sha256_hash = sha256_hasher.hexdigest()
md5_hash = md5_hasher.hexdigest()
sha1_hash = sha1_hasher.hexdigest()
except HashComputationError:
raise
except Exception as e:
logger.error(f"Hash computation failed for multipart upload: {e}")
raise HashComputationError(f"Failed to compute hash: {e}") from e
sha256_hash = sha256_hasher.hexdigest()
md5_hash = md5_hasher.hexdigest()
sha1_hash = sha1_hasher.hexdigest()
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}" s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
# Check if already exists (deduplication) # Check if already exists (deduplication) with retry logic
if self._exists(s3_key): try:
exists = self._exists(s3_key)
except S3ExistenceCheckError:
raise
except Exception as e:
logger.error(f"Unexpected error during S3 existence check: {e}")
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
if exists:
obj_info = self.get_object_info(s3_key) obj_info = self.get_object_info(s3_key)
s3_etag = obj_info.get("etag", "").strip('"') if obj_info else None s3_etag = obj_info.get("etag", "").strip('"') if obj_info else None
return StorageResult( return StorageResult(
@@ -150,10 +235,12 @@ class S3Storage:
PartNumber=part_number, PartNumber=part_number,
Body=chunk, Body=chunk,
) )
parts.append({ parts.append(
"PartNumber": part_number, {
"ETag": response["ETag"], "PartNumber": part_number,
}) "ETag": response["ETag"],
}
)
part_number += 1 part_number += 1
# Complete multipart upload # Complete multipart upload
@@ -226,7 +313,9 @@ class S3Storage:
# Upload based on size # Upload based on size
if size < MULTIPART_THRESHOLD: if size < MULTIPART_THRESHOLD:
content = b"".join(all_chunks) content = b"".join(all_chunks)
response = self.client.put_object(Bucket=self.bucket, Key=s3_key, Body=content) response = self.client.put_object(
Bucket=self.bucket, Key=s3_key, Body=content
)
s3_etag = response.get("ETag", "").strip('"') s3_etag = response.get("ETag", "").strip('"')
else: else:
# Use multipart for large files # Use multipart for large files
@@ -251,10 +340,12 @@ class S3Storage:
PartNumber=part_number, PartNumber=part_number,
Body=part_data, Body=part_data,
) )
parts.append({ parts.append(
"PartNumber": part_number, {
"ETag": response["ETag"], "PartNumber": part_number,
}) "ETag": response["ETag"],
}
)
part_number += 1 part_number += 1
# Upload remaining buffer # Upload remaining buffer
@@ -266,10 +357,12 @@ class S3Storage:
PartNumber=part_number, PartNumber=part_number,
Body=buffer, Body=buffer,
) )
parts.append({ parts.append(
"PartNumber": part_number, {
"ETag": response["ETag"], "PartNumber": part_number,
}) "ETag": response["ETag"],
}
)
complete_response = self.client.complete_multipart_upload( complete_response = self.client.complete_multipart_upload(
Bucket=self.bucket, Bucket=self.bucket,
@@ -326,7 +419,9 @@ class S3Storage:
self._active_uploads[upload_id] = session self._active_uploads[upload_id] = session
return session return session
def upload_part(self, upload_id: str, part_number: int, data: bytes) -> Dict[str, Any]: def upload_part(
self, upload_id: str, part_number: int, data: bytes
) -> Dict[str, Any]:
""" """
Upload a part for a resumable upload. Upload a part for a resumable upload.
Returns part info including ETag. Returns part info including ETag.
@@ -434,13 +529,50 @@ class S3Storage:
except ClientError: except ClientError:
return None return None
def _exists(self, s3_key: str) -> bool: def _exists(self, s3_key: str, retry: bool = True) -> bool:
"""Check if an object exists""" """
try: Check if an object exists with optional retry logic.
self.client.head_object(Bucket=self.bucket, Key=s3_key)
return True Args:
except ClientError: s3_key: The S3 key to check
return False retry: Whether to retry on transient failures (default: True)
Returns:
True if object exists, False otherwise
Raises:
S3ExistenceCheckError: If all retries fail due to non-404 errors
"""
import time
max_retries = MAX_EXISTENCE_CHECK_RETRIES if retry else 1
last_error = None
for attempt in range(max_retries):
try:
self.client.head_object(Bucket=self.bucket, Key=s3_key)
return True
except ClientError as e:
error_code = e.response.get("Error", {}).get("Code", "")
# 404 means object doesn't exist - not an error
if error_code in ("404", "NoSuchKey"):
return False
# For other errors, retry
last_error = e
if attempt < max_retries - 1:
logger.warning(
f"S3 existence check failed (attempt {attempt + 1}/{max_retries}): {e}"
)
time.sleep(0.1 * (attempt + 1)) # Exponential backoff
# All retries failed
logger.error(
f"S3 existence check failed after {max_retries} attempts: {last_error}"
)
raise S3ExistenceCheckError(
f"Failed to check S3 object existence after {max_retries} attempts: {last_error}"
)
def delete(self, s3_key: str) -> bool: def delete(self, s3_key: str) -> bool:
"""Delete an object""" """Delete an object"""