Add ref_count management for deletions with atomic operations and error handling
- Add DELETE endpoints for tags, packages, and projects with proper ref_count decrements for all affected artifacts - Implement atomic ref_count operations using SELECT FOR UPDATE row-level locking to prevent race conditions - Add custom storage exceptions (HashComputationError, S3ExistenceCheckError, S3UploadError) with retry logic for S3 existence checks - Handle race conditions in upload by locking artifact row before modification - Add comprehensive logging for all ref_count changes and deduplication events - Include ref_count in upload response schema
This commit is contained in:
@@ -16,10 +16,37 @@ MULTIPART_THRESHOLD = 100 * 1024 * 1024
|
||||
MULTIPART_CHUNK_SIZE = 10 * 1024 * 1024
|
||||
# Chunk size for streaming hash computation
|
||||
HASH_CHUNK_SIZE = 8 * 1024 * 1024
|
||||
# Maximum retries for S3 existence check
|
||||
MAX_EXISTENCE_CHECK_RETRIES = 3
|
||||
|
||||
|
||||
class StorageError(Exception):
|
||||
"""Base exception for storage operations"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class HashComputationError(StorageError):
|
||||
"""Raised when hash computation fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3ExistenceCheckError(StorageError):
|
||||
"""Raised when S3 existence check fails after retries"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3UploadError(StorageError):
|
||||
"""Raised when S3 upload fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StorageResult(NamedTuple):
|
||||
"""Result of storing a file with all computed checksums"""
|
||||
|
||||
sha256: str
|
||||
size: int
|
||||
s3_key: str
|
||||
@@ -30,7 +57,9 @@ class StorageResult(NamedTuple):
|
||||
|
||||
class S3Storage:
|
||||
def __init__(self):
|
||||
config = Config(s3={"addressing_style": "path"} if settings.s3_use_path_style else {})
|
||||
config = Config(
|
||||
s3={"addressing_style": "path"} if settings.s3_use_path_style else {}
|
||||
)
|
||||
|
||||
self.client = boto3.client(
|
||||
"s3",
|
||||
@@ -44,7 +73,9 @@ class S3Storage:
|
||||
# Store active multipart uploads for resumable support
|
||||
self._active_uploads: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
def store(self, file: BinaryIO, content_length: Optional[int] = None) -> StorageResult:
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> StorageResult:
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
Content-addressable: if the file already exists, just return the hash.
|
||||
@@ -57,25 +88,54 @@ class S3Storage:
|
||||
return self._store_multipart(file, content_length)
|
||||
|
||||
def _store_simple(self, file: BinaryIO) -> StorageResult:
|
||||
"""Store a small file using simple put_object"""
|
||||
# Read file and compute all hashes
|
||||
content = file.read()
|
||||
sha256_hash = hashlib.sha256(content).hexdigest()
|
||||
md5_hash = hashlib.md5(content).hexdigest()
|
||||
sha1_hash = hashlib.sha1(content).hexdigest()
|
||||
size = len(content)
|
||||
"""
|
||||
Store a small file using simple put_object.
|
||||
|
||||
# Check if already exists
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
S3ExistenceCheckError: If S3 existence check fails after retries
|
||||
S3UploadError: If S3 upload fails
|
||||
"""
|
||||
# Read file and compute all hashes with error handling
|
||||
try:
|
||||
content = file.read()
|
||||
if not content:
|
||||
raise HashComputationError("Empty file content")
|
||||
|
||||
sha256_hash = hashlib.sha256(content).hexdigest()
|
||||
md5_hash = hashlib.md5(content).hexdigest()
|
||||
sha1_hash = hashlib.sha1(content).hexdigest()
|
||||
size = len(content)
|
||||
except HashComputationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Hash computation failed: {e}")
|
||||
raise HashComputationError(f"Failed to compute hash: {e}") from e
|
||||
|
||||
# Check if already exists (with retry logic)
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
s3_etag = None
|
||||
|
||||
if not self._exists(s3_key):
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket,
|
||||
Key=s3_key,
|
||||
Body=content,
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
try:
|
||||
exists = self._exists(s3_key)
|
||||
except S3ExistenceCheckError:
|
||||
# Re-raise the specific error
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during S3 existence check: {e}")
|
||||
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
|
||||
|
||||
if not exists:
|
||||
try:
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket,
|
||||
Key=s3_key,
|
||||
Body=content,
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
except ClientError as e:
|
||||
logger.error(f"S3 upload failed: {e}")
|
||||
raise S3UploadError(f"Failed to upload to S3: {e}") from e
|
||||
else:
|
||||
# Get existing ETag
|
||||
obj_info = self.get_object_info(s3_key)
|
||||
@@ -92,30 +152,55 @@ class S3Storage:
|
||||
)
|
||||
|
||||
def _store_multipart(self, file: BinaryIO, content_length: int) -> StorageResult:
|
||||
"""Store a large file using S3 multipart upload with streaming hash computation"""
|
||||
"""
|
||||
Store a large file using S3 multipart upload with streaming hash computation.
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
S3ExistenceCheckError: If S3 existence check fails after retries
|
||||
S3UploadError: If S3 upload fails
|
||||
"""
|
||||
# First pass: compute all hashes by streaming through file
|
||||
sha256_hasher = hashlib.sha256()
|
||||
md5_hasher = hashlib.md5()
|
||||
sha1_hasher = hashlib.sha1()
|
||||
size = 0
|
||||
try:
|
||||
sha256_hasher = hashlib.sha256()
|
||||
md5_hasher = hashlib.md5()
|
||||
sha1_hasher = hashlib.sha1()
|
||||
size = 0
|
||||
|
||||
# Read file in chunks to compute hashes
|
||||
while True:
|
||||
chunk = file.read(HASH_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
sha256_hasher.update(chunk)
|
||||
md5_hasher.update(chunk)
|
||||
sha1_hasher.update(chunk)
|
||||
size += len(chunk)
|
||||
# Read file in chunks to compute hashes
|
||||
while True:
|
||||
chunk = file.read(HASH_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
sha256_hasher.update(chunk)
|
||||
md5_hasher.update(chunk)
|
||||
sha1_hasher.update(chunk)
|
||||
size += len(chunk)
|
||||
|
||||
if size == 0:
|
||||
raise HashComputationError("Empty file content")
|
||||
|
||||
sha256_hash = sha256_hasher.hexdigest()
|
||||
md5_hash = md5_hasher.hexdigest()
|
||||
sha1_hash = sha1_hasher.hexdigest()
|
||||
except HashComputationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Hash computation failed for multipart upload: {e}")
|
||||
raise HashComputationError(f"Failed to compute hash: {e}") from e
|
||||
|
||||
sha256_hash = sha256_hasher.hexdigest()
|
||||
md5_hash = md5_hasher.hexdigest()
|
||||
sha1_hash = sha1_hasher.hexdigest()
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
# Check if already exists (deduplication)
|
||||
if self._exists(s3_key):
|
||||
# Check if already exists (deduplication) with retry logic
|
||||
try:
|
||||
exists = self._exists(s3_key)
|
||||
except S3ExistenceCheckError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during S3 existence check: {e}")
|
||||
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
|
||||
|
||||
if exists:
|
||||
obj_info = self.get_object_info(s3_key)
|
||||
s3_etag = obj_info.get("etag", "").strip('"') if obj_info else None
|
||||
return StorageResult(
|
||||
@@ -150,10 +235,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=chunk,
|
||||
)
|
||||
parts.append({
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
}
|
||||
)
|
||||
part_number += 1
|
||||
|
||||
# Complete multipart upload
|
||||
@@ -226,7 +313,9 @@ class S3Storage:
|
||||
# Upload based on size
|
||||
if size < MULTIPART_THRESHOLD:
|
||||
content = b"".join(all_chunks)
|
||||
response = self.client.put_object(Bucket=self.bucket, Key=s3_key, Body=content)
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket, Key=s3_key, Body=content
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
else:
|
||||
# Use multipart for large files
|
||||
@@ -251,10 +340,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=part_data,
|
||||
)
|
||||
parts.append({
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
}
|
||||
)
|
||||
part_number += 1
|
||||
|
||||
# Upload remaining buffer
|
||||
@@ -266,10 +357,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=buffer,
|
||||
)
|
||||
parts.append({
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
}
|
||||
)
|
||||
|
||||
complete_response = self.client.complete_multipart_upload(
|
||||
Bucket=self.bucket,
|
||||
@@ -326,7 +419,9 @@ class S3Storage:
|
||||
self._active_uploads[upload_id] = session
|
||||
return session
|
||||
|
||||
def upload_part(self, upload_id: str, part_number: int, data: bytes) -> Dict[str, Any]:
|
||||
def upload_part(
|
||||
self, upload_id: str, part_number: int, data: bytes
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Upload a part for a resumable upload.
|
||||
Returns part info including ETag.
|
||||
@@ -434,13 +529,50 @@ class S3Storage:
|
||||
except ClientError:
|
||||
return None
|
||||
|
||||
def _exists(self, s3_key: str) -> bool:
|
||||
"""Check if an object exists"""
|
||||
try:
|
||||
self.client.head_object(Bucket=self.bucket, Key=s3_key)
|
||||
return True
|
||||
except ClientError:
|
||||
return False
|
||||
def _exists(self, s3_key: str, retry: bool = True) -> bool:
|
||||
"""
|
||||
Check if an object exists with optional retry logic.
|
||||
|
||||
Args:
|
||||
s3_key: The S3 key to check
|
||||
retry: Whether to retry on transient failures (default: True)
|
||||
|
||||
Returns:
|
||||
True if object exists, False otherwise
|
||||
|
||||
Raises:
|
||||
S3ExistenceCheckError: If all retries fail due to non-404 errors
|
||||
"""
|
||||
import time
|
||||
|
||||
max_retries = MAX_EXISTENCE_CHECK_RETRIES if retry else 1
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
self.client.head_object(Bucket=self.bucket, Key=s3_key)
|
||||
return True
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code", "")
|
||||
# 404 means object doesn't exist - not an error
|
||||
if error_code in ("404", "NoSuchKey"):
|
||||
return False
|
||||
|
||||
# For other errors, retry
|
||||
last_error = e
|
||||
if attempt < max_retries - 1:
|
||||
logger.warning(
|
||||
f"S3 existence check failed (attempt {attempt + 1}/{max_retries}): {e}"
|
||||
)
|
||||
time.sleep(0.1 * (attempt + 1)) # Exponential backoff
|
||||
|
||||
# All retries failed
|
||||
logger.error(
|
||||
f"S3 existence check failed after {max_retries} attempts: {last_error}"
|
||||
)
|
||||
raise S3ExistenceCheckError(
|
||||
f"Failed to check S3 object existence after {max_retries} attempts: {last_error}"
|
||||
)
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""Delete an object"""
|
||||
|
||||
Reference in New Issue
Block a user