Add ref_count management for deletions with atomic operations and error handling
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -37,6 +37,10 @@ Thumbs.db
|
||||
# Build
|
||||
/build/
|
||||
/dist/
|
||||
frontend/dist/
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
|
||||
# Local config overrides
|
||||
config.local.yaml
|
||||
|
||||
@@ -13,6 +13,8 @@ kics:
|
||||
hadolint:
|
||||
allow_failure: true
|
||||
|
||||
secrets:
|
||||
allow_failure: true
|
||||
|
||||
# Run Python tests
|
||||
python_tests:
|
||||
|
||||
37
CHANGELOG.md
37
CHANGELOG.md
@@ -6,8 +6,45 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
### Added
|
||||
- Added `StorageBackend` protocol/interface for backend-agnostic storage (#33)
|
||||
- Added `health_check()` method to storage backend with `/health` endpoint integration (#33)
|
||||
- Added `verify_integrity()` method for post-upload hash validation (#33)
|
||||
- Added S3 configuration options: `s3_verify_ssl`, `s3_connect_timeout`, `s3_read_timeout`, `s3_max_retries` (#33)
|
||||
- Added `S3StorageUnavailableError` and `HashCollisionError` exception types (#33)
|
||||
- Added hash collision detection by comparing file sizes during deduplication (#33)
|
||||
- Added garbage collection endpoint `POST /api/v1/admin/garbage-collect` for orphaned artifacts (#36)
|
||||
- Added orphaned artifacts listing endpoint `GET /api/v1/admin/orphaned-artifacts` (#36)
|
||||
- Added global storage statistics endpoint `GET /api/v1/stats` (#34)
|
||||
- Added storage breakdown endpoint `GET /api/v1/stats/storage` (#34)
|
||||
- Added deduplication metrics endpoint `GET /api/v1/stats/deduplication` (#34)
|
||||
- Added per-project statistics endpoint `GET /api/v1/projects/{project}/stats` (#34)
|
||||
- Added per-package statistics endpoint `GET /api/v1/project/{project}/packages/{package}/stats` (#34)
|
||||
- Added per-artifact statistics endpoint `GET /api/v1/artifact/{id}/stats` (#34)
|
||||
- Added cross-project deduplication endpoint `GET /api/v1/stats/cross-project` (#34)
|
||||
- Added timeline statistics endpoint `GET /api/v1/stats/timeline` with daily/weekly/monthly periods (#34)
|
||||
- Added stats export endpoint `GET /api/v1/stats/export` with JSON/CSV formats (#34)
|
||||
- Added summary report endpoint `GET /api/v1/stats/report` with markdown/JSON formats (#34)
|
||||
- Added Dashboard page at `/dashboard` with storage and deduplication visualizations (#34)
|
||||
- Added pytest infrastructure with mock S3 client for unit testing (#35)
|
||||
- Added unit tests for SHA256 hash calculation (#35)
|
||||
- Added unit tests for duplicate detection and deduplication behavior (#35)
|
||||
- Added integration tests for upload scenarios and ref_count management (#35)
|
||||
- Added integration tests for S3 verification and failure cleanup (#35)
|
||||
- Added integration tests for all stats endpoints (#35)
|
||||
- Added integration tests for cascade deletion ref_count behavior (package/project delete) (#35)
|
||||
- Added integration tests for tag update ref_count adjustments (#35)
|
||||
- Added integration tests for garbage collection endpoints (#35)
|
||||
- Added integration tests for file size validation (#35)
|
||||
- Added test dependencies to requirements.txt (pytest, pytest-asyncio, pytest-cov, httpx, moto) (#35)
|
||||
- Added `ORCHARD_MAX_FILE_SIZE` config option (default: 10GB) for upload size limits (#37)
|
||||
- Added `ORCHARD_MIN_FILE_SIZE` config option (default: 1 byte, rejects empty files) (#37)
|
||||
- Added file size validation to upload and resumable upload endpoints (#37)
|
||||
- Added comprehensive deduplication design document (`docs/design/deduplication-design.md`) (#37)
|
||||
### Fixed
|
||||
- Fixed Helm chart `minio.ingress` conflicting with Bitnami MinIO subchart by renaming to `minioIngress` (#48)
|
||||
- Fixed JSON report serialization error for Decimal types in `GET /api/v1/stats/report` (#34)
|
||||
- Fixed resumable upload double-counting ref_count when tag provided (removed manual increment, SQL triggers handle it) (#35)
|
||||
|
||||
## [0.3.0] - 2025-12-15
|
||||
### Changed
|
||||
|
||||
@@ -22,7 +22,9 @@ class Settings(BaseSettings):
|
||||
database_pool_size: int = 5 # Number of connections to keep open
|
||||
database_max_overflow: int = 10 # Max additional connections beyond pool_size
|
||||
database_pool_timeout: int = 30 # Seconds to wait for a connection from pool
|
||||
database_pool_recycle: int = 1800 # Recycle connections after this many seconds (30 min)
|
||||
database_pool_recycle: int = (
|
||||
1800 # Recycle connections after this many seconds (30 min)
|
||||
)
|
||||
|
||||
# S3
|
||||
s3_endpoint: str = ""
|
||||
@@ -31,10 +33,20 @@ class Settings(BaseSettings):
|
||||
s3_access_key_id: str = ""
|
||||
s3_secret_access_key: str = ""
|
||||
s3_use_path_style: bool = True
|
||||
s3_verify_ssl: bool = True # Set to False for self-signed certs (dev only)
|
||||
s3_connect_timeout: int = 10 # Connection timeout in seconds
|
||||
s3_read_timeout: int = 60 # Read timeout in seconds
|
||||
s3_max_retries: int = 3 # Max retry attempts for transient failures
|
||||
|
||||
# Upload settings
|
||||
max_file_size: int = 10 * 1024 * 1024 * 1024 # 10GB default max file size
|
||||
min_file_size: int = 1 # Minimum 1 byte (empty files rejected)
|
||||
|
||||
# Download settings
|
||||
download_mode: str = "presigned" # "presigned", "redirect", or "proxy"
|
||||
presigned_url_expiry: int = 3600 # Presigned URL expiry in seconds (default: 1 hour)
|
||||
presigned_url_expiry: int = (
|
||||
3600 # Presigned URL expiry in seconds (default: 1 hour)
|
||||
)
|
||||
|
||||
@property
|
||||
def database_url(self) -> str:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional, List, Dict, Any, Generic, TypeVar
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, field_validator
|
||||
from uuid import UUID
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -40,8 +40,28 @@ class ProjectResponse(BaseModel):
|
||||
|
||||
|
||||
# Package format and platform enums
|
||||
PACKAGE_FORMATS = ["generic", "npm", "pypi", "docker", "deb", "rpm", "maven", "nuget", "helm"]
|
||||
PACKAGE_PLATFORMS = ["any", "linux", "darwin", "windows", "linux-amd64", "linux-arm64", "darwin-amd64", "darwin-arm64", "windows-amd64"]
|
||||
PACKAGE_FORMATS = [
|
||||
"generic",
|
||||
"npm",
|
||||
"pypi",
|
||||
"docker",
|
||||
"deb",
|
||||
"rpm",
|
||||
"maven",
|
||||
"nuget",
|
||||
"helm",
|
||||
]
|
||||
PACKAGE_PLATFORMS = [
|
||||
"any",
|
||||
"linux",
|
||||
"darwin",
|
||||
"windows",
|
||||
"linux-amd64",
|
||||
"linux-arm64",
|
||||
"darwin-amd64",
|
||||
"darwin-arm64",
|
||||
"windows-amd64",
|
||||
]
|
||||
|
||||
|
||||
# Package schemas
|
||||
@@ -68,6 +88,7 @@ class PackageResponse(BaseModel):
|
||||
|
||||
class TagSummary(BaseModel):
|
||||
"""Lightweight tag info for embedding in package responses"""
|
||||
|
||||
name: str
|
||||
artifact_id: str
|
||||
created_at: datetime
|
||||
@@ -75,6 +96,7 @@ class TagSummary(BaseModel):
|
||||
|
||||
class PackageDetailResponse(BaseModel):
|
||||
"""Package with aggregated metadata"""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
name: str
|
||||
@@ -135,6 +157,7 @@ class TagResponse(BaseModel):
|
||||
|
||||
class TagDetailResponse(BaseModel):
|
||||
"""Tag with embedded artifact metadata"""
|
||||
|
||||
id: UUID
|
||||
package_id: UUID
|
||||
name: str
|
||||
@@ -154,6 +177,7 @@ class TagDetailResponse(BaseModel):
|
||||
|
||||
class TagHistoryResponse(BaseModel):
|
||||
"""History entry for tag changes"""
|
||||
|
||||
id: UUID
|
||||
tag_id: UUID
|
||||
old_artifact_id: Optional[str]
|
||||
@@ -167,6 +191,7 @@ class TagHistoryResponse(BaseModel):
|
||||
|
||||
class ArtifactTagInfo(BaseModel):
|
||||
"""Tag info for embedding in artifact responses"""
|
||||
|
||||
id: UUID
|
||||
name: str
|
||||
package_id: UUID
|
||||
@@ -176,6 +201,7 @@ class ArtifactTagInfo(BaseModel):
|
||||
|
||||
class ArtifactDetailResponse(BaseModel):
|
||||
"""Artifact with list of tags/packages referencing it"""
|
||||
|
||||
id: str
|
||||
sha256: str # Explicit SHA256 field (same as id)
|
||||
size: int
|
||||
@@ -196,6 +222,7 @@ class ArtifactDetailResponse(BaseModel):
|
||||
|
||||
class PackageArtifactResponse(BaseModel):
|
||||
"""Artifact with tags for package artifact listing"""
|
||||
|
||||
id: str
|
||||
sha256: str # Explicit SHA256 field (same as id)
|
||||
size: int
|
||||
@@ -226,20 +253,35 @@ class UploadResponse(BaseModel):
|
||||
s3_etag: Optional[str] = None
|
||||
format_metadata: Optional[Dict[str, Any]] = None
|
||||
deduplicated: bool = False
|
||||
ref_count: int = 1 # Current reference count after this upload
|
||||
|
||||
|
||||
# Resumable upload schemas
|
||||
class ResumableUploadInitRequest(BaseModel):
|
||||
"""Request to initiate a resumable upload"""
|
||||
|
||||
expected_hash: str # SHA256 hash of the file (client must compute)
|
||||
filename: str
|
||||
content_type: Optional[str] = None
|
||||
size: int
|
||||
tag: Optional[str] = None
|
||||
|
||||
@field_validator("expected_hash")
|
||||
@classmethod
|
||||
def validate_sha256_hash(cls, v: str) -> str:
|
||||
"""Validate that expected_hash is a valid 64-character lowercase hex SHA256 hash."""
|
||||
import re
|
||||
|
||||
if not re.match(r"^[a-f0-9]{64}$", v.lower()):
|
||||
raise ValueError(
|
||||
"expected_hash must be a valid 64-character lowercase hexadecimal SHA256 hash"
|
||||
)
|
||||
return v.lower() # Normalize to lowercase
|
||||
|
||||
|
||||
class ResumableUploadInitResponse(BaseModel):
|
||||
"""Response from initiating a resumable upload"""
|
||||
|
||||
upload_id: Optional[str] # None if file already exists
|
||||
already_exists: bool
|
||||
artifact_id: Optional[str] = None # Set if already_exists is True
|
||||
@@ -248,17 +290,20 @@ class ResumableUploadInitResponse(BaseModel):
|
||||
|
||||
class ResumableUploadPartResponse(BaseModel):
|
||||
"""Response from uploading a part"""
|
||||
|
||||
part_number: int
|
||||
etag: str
|
||||
|
||||
|
||||
class ResumableUploadCompleteRequest(BaseModel):
|
||||
"""Request to complete a resumable upload"""
|
||||
|
||||
tag: Optional[str] = None
|
||||
|
||||
|
||||
class ResumableUploadCompleteResponse(BaseModel):
|
||||
"""Response from completing a resumable upload"""
|
||||
|
||||
artifact_id: str
|
||||
size: int
|
||||
project: str
|
||||
@@ -268,6 +313,7 @@ class ResumableUploadCompleteResponse(BaseModel):
|
||||
|
||||
class ResumableUploadStatusResponse(BaseModel):
|
||||
"""Status of a resumable upload"""
|
||||
|
||||
upload_id: str
|
||||
uploaded_parts: List[int]
|
||||
total_uploaded_bytes: int
|
||||
@@ -288,6 +334,7 @@ class ConsumerResponse(BaseModel):
|
||||
# Global search schemas
|
||||
class SearchResultProject(BaseModel):
|
||||
"""Project result for global search"""
|
||||
|
||||
id: UUID
|
||||
name: str
|
||||
description: Optional[str]
|
||||
@@ -299,6 +346,7 @@ class SearchResultProject(BaseModel):
|
||||
|
||||
class SearchResultPackage(BaseModel):
|
||||
"""Package result for global search"""
|
||||
|
||||
id: UUID
|
||||
project_id: UUID
|
||||
project_name: str
|
||||
@@ -312,6 +360,7 @@ class SearchResultPackage(BaseModel):
|
||||
|
||||
class SearchResultArtifact(BaseModel):
|
||||
"""Artifact/tag result for global search"""
|
||||
|
||||
tag_id: UUID
|
||||
tag_name: str
|
||||
artifact_id: str
|
||||
@@ -323,6 +372,7 @@ class SearchResultArtifact(BaseModel):
|
||||
|
||||
class GlobalSearchResponse(BaseModel):
|
||||
"""Combined search results across all entity types"""
|
||||
|
||||
query: str
|
||||
projects: List[SearchResultProject]
|
||||
packages: List[SearchResultPackage]
|
||||
@@ -333,6 +383,7 @@ class GlobalSearchResponse(BaseModel):
|
||||
# Presigned URL response
|
||||
class PresignedUrlResponse(BaseModel):
|
||||
"""Response containing a presigned URL for direct S3 download"""
|
||||
|
||||
url: str
|
||||
expires_at: datetime
|
||||
method: str = "GET"
|
||||
@@ -348,3 +399,131 @@ class PresignedUrlResponse(BaseModel):
|
||||
class HealthResponse(BaseModel):
|
||||
status: str
|
||||
version: str = "1.0.0"
|
||||
storage_healthy: Optional[bool] = None
|
||||
database_healthy: Optional[bool] = None
|
||||
|
||||
|
||||
# Garbage collection schemas
|
||||
class GarbageCollectionResponse(BaseModel):
|
||||
"""Response from garbage collection operation"""
|
||||
|
||||
artifacts_deleted: int
|
||||
bytes_freed: int
|
||||
artifact_ids: List[str]
|
||||
dry_run: bool
|
||||
|
||||
|
||||
class OrphanedArtifactResponse(BaseModel):
|
||||
"""Information about an orphaned artifact"""
|
||||
|
||||
id: str
|
||||
size: int
|
||||
created_at: datetime
|
||||
created_by: str
|
||||
original_name: Optional[str]
|
||||
|
||||
|
||||
# Storage statistics schemas
|
||||
class StorageStatsResponse(BaseModel):
|
||||
"""Global storage statistics"""
|
||||
|
||||
total_artifacts: int
|
||||
total_size_bytes: int
|
||||
unique_artifacts: int # Artifacts with ref_count > 0
|
||||
orphaned_artifacts: int # Artifacts with ref_count = 0
|
||||
orphaned_size_bytes: int
|
||||
total_uploads: int
|
||||
deduplicated_uploads: int
|
||||
deduplication_ratio: (
|
||||
float # total_uploads / unique_artifacts (if > 1, deduplication is working)
|
||||
)
|
||||
storage_saved_bytes: int # Bytes saved through deduplication
|
||||
|
||||
|
||||
class DeduplicationStatsResponse(BaseModel):
|
||||
"""Deduplication effectiveness statistics"""
|
||||
|
||||
total_logical_bytes: (
|
||||
int # Sum of all upload sizes (what would be stored without dedup)
|
||||
)
|
||||
total_physical_bytes: int # Actual storage used
|
||||
bytes_saved: int
|
||||
savings_percentage: float
|
||||
total_uploads: int
|
||||
unique_artifacts: int
|
||||
duplicate_uploads: int
|
||||
average_ref_count: float
|
||||
max_ref_count: int
|
||||
most_referenced_artifacts: List[Dict[str, Any]] # Top N most referenced
|
||||
|
||||
|
||||
class ProjectStatsResponse(BaseModel):
|
||||
"""Per-project statistics"""
|
||||
|
||||
project_id: str
|
||||
project_name: str
|
||||
package_count: int
|
||||
tag_count: int
|
||||
artifact_count: int
|
||||
total_size_bytes: int
|
||||
upload_count: int
|
||||
deduplicated_uploads: int
|
||||
storage_saved_bytes: int = 0 # Bytes saved through deduplication
|
||||
deduplication_ratio: float = 1.0 # upload_count / artifact_count
|
||||
|
||||
|
||||
class PackageStatsResponse(BaseModel):
|
||||
"""Per-package statistics"""
|
||||
|
||||
package_id: str
|
||||
package_name: str
|
||||
project_name: str
|
||||
tag_count: int
|
||||
artifact_count: int
|
||||
total_size_bytes: int
|
||||
upload_count: int
|
||||
deduplicated_uploads: int
|
||||
storage_saved_bytes: int = 0
|
||||
deduplication_ratio: float = 1.0
|
||||
|
||||
|
||||
class ArtifactStatsResponse(BaseModel):
|
||||
"""Per-artifact reference statistics"""
|
||||
|
||||
artifact_id: str
|
||||
sha256: str
|
||||
size: int
|
||||
ref_count: int
|
||||
storage_savings: int # (ref_count - 1) * size
|
||||
tags: List[Dict[str, Any]] # Tags referencing this artifact
|
||||
projects: List[str] # Projects using this artifact
|
||||
packages: List[str] # Packages using this artifact
|
||||
first_uploaded: Optional[datetime] = None
|
||||
last_referenced: Optional[datetime] = None
|
||||
|
||||
|
||||
class CrossProjectDeduplicationResponse(BaseModel):
|
||||
"""Cross-project deduplication statistics"""
|
||||
|
||||
shared_artifacts_count: int # Artifacts used in multiple projects
|
||||
total_cross_project_savings: int # Bytes saved by cross-project sharing
|
||||
shared_artifacts: List[Dict[str, Any]] # Details of shared artifacts
|
||||
|
||||
|
||||
class TimeBasedStatsResponse(BaseModel):
|
||||
"""Time-based deduplication statistics"""
|
||||
|
||||
period: str # "daily", "weekly", "monthly"
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
data_points: List[
|
||||
Dict[str, Any]
|
||||
] # List of {date, uploads, unique, duplicated, bytes_saved}
|
||||
|
||||
|
||||
class StatsReportResponse(BaseModel):
|
||||
"""Summary report in various formats"""
|
||||
|
||||
format: str # "json", "csv", "markdown"
|
||||
generated_at: datetime
|
||||
content: str # The report content
|
||||
|
||||
@@ -1,25 +1,201 @@
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import BinaryIO, Tuple, Optional, Dict, Any, Generator, NamedTuple
|
||||
from typing import (
|
||||
BinaryIO,
|
||||
Tuple,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
Generator,
|
||||
NamedTuple,
|
||||
Protocol,
|
||||
runtime_checkable,
|
||||
)
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from botocore.exceptions import (
|
||||
ClientError,
|
||||
ConnectionError as BotoConnectionError,
|
||||
EndpointConnectionError,
|
||||
ReadTimeoutError,
|
||||
ConnectTimeoutError,
|
||||
)
|
||||
|
||||
from .config import get_settings
|
||||
|
||||
settings = get_settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Storage Backend Protocol/Interface (ISSUE 33)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StorageBackend(Protocol):
|
||||
"""
|
||||
Abstract protocol defining the interface for storage backends.
|
||||
|
||||
All storage implementations (S3, MinIO, future backends) must implement
|
||||
this interface to ensure consistent behavior across the application.
|
||||
|
||||
Note on Deduplication:
|
||||
- This system uses whole-file deduplication based on SHA256 hash
|
||||
- Partial/chunk-level deduplication is NOT supported (out of scope for MVP)
|
||||
- Files with identical content but different metadata are deduplicated
|
||||
"""
|
||||
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> "StorageResult":
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
|
||||
Content-addressable: if the file already exists (by hash), just return
|
||||
the existing hash without uploading again.
|
||||
|
||||
Args:
|
||||
file: File-like object to store
|
||||
content_length: Optional hint for file size (enables multipart upload)
|
||||
|
||||
Returns:
|
||||
StorageResult with sha256, size, s3_key, and optional checksums
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
S3ExistenceCheckError: If existence check fails after retries
|
||||
S3UploadError: If upload fails
|
||||
"""
|
||||
...
|
||||
|
||||
def get(self, s3_key: str) -> bytes:
|
||||
"""
|
||||
Retrieve a file by its storage key.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key (path) of the file
|
||||
|
||||
Returns:
|
||||
File content as bytes
|
||||
"""
|
||||
...
|
||||
|
||||
def get_stream(
|
||||
self, s3_key: str, range_header: Optional[str] = None
|
||||
) -> Tuple[Any, int, Optional[str]]:
|
||||
"""
|
||||
Get a streaming response for a file.
|
||||
|
||||
Supports range requests for partial downloads.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
range_header: Optional HTTP Range header value
|
||||
|
||||
Returns:
|
||||
Tuple of (stream, content_length, content_range)
|
||||
"""
|
||||
...
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""
|
||||
Delete a file from storage.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file to delete
|
||||
|
||||
Returns:
|
||||
True if deleted successfully, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
def get_object_info(self, s3_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get object metadata without downloading content.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
|
||||
Returns:
|
||||
Dict with size, content_type, last_modified, etag, or None if not found
|
||||
"""
|
||||
...
|
||||
|
||||
def generate_presigned_url(
|
||||
self,
|
||||
s3_key: str,
|
||||
expiry: Optional[int] = None,
|
||||
response_content_type: Optional[str] = None,
|
||||
response_content_disposition: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a presigned URL for downloading an object.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expiry: URL expiry in seconds
|
||||
response_content_type: Override Content-Type header in response
|
||||
response_content_disposition: Override Content-Disposition header
|
||||
|
||||
Returns:
|
||||
Presigned URL string
|
||||
"""
|
||||
...
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
# Threshold for multipart upload (100MB)
|
||||
MULTIPART_THRESHOLD = 100 * 1024 * 1024
|
||||
# Chunk size for multipart upload (10MB)
|
||||
MULTIPART_CHUNK_SIZE = 10 * 1024 * 1024
|
||||
# Chunk size for streaming hash computation
|
||||
HASH_CHUNK_SIZE = 8 * 1024 * 1024
|
||||
# Maximum retries for S3 existence check
|
||||
MAX_EXISTENCE_CHECK_RETRIES = 3
|
||||
|
||||
|
||||
class StorageError(Exception):
|
||||
"""Base exception for storage operations"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class HashComputationError(StorageError):
|
||||
"""Raised when hash computation fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FileSizeExceededError(StorageError):
|
||||
"""Raised when file exceeds maximum size during upload"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3ExistenceCheckError(StorageError):
|
||||
"""Raised when S3 existence check fails after retries"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3UploadError(StorageError):
|
||||
"""Raised when S3 upload fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StorageResult(NamedTuple):
|
||||
"""Result of storing a file with all computed checksums"""
|
||||
|
||||
sha256: str
|
||||
size: int
|
||||
s3_key: str
|
||||
@@ -28,9 +204,34 @@ class StorageResult(NamedTuple):
|
||||
s3_etag: Optional[str] = None
|
||||
|
||||
|
||||
class S3StorageUnavailableError(StorageError):
|
||||
"""Raised when S3 storage backend is unavailable"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class HashCollisionError(StorageError):
|
||||
"""Raised when a hash collision is detected (extremely rare)"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class S3Storage:
|
||||
def __init__(self):
|
||||
config = Config(s3={"addressing_style": "path"} if settings.s3_use_path_style else {})
|
||||
# Build config with retry and timeout settings
|
||||
s3_config = {}
|
||||
if settings.s3_use_path_style:
|
||||
s3_config["addressing_style"] = "path"
|
||||
|
||||
config = Config(
|
||||
s3=s3_config if s3_config else None,
|
||||
connect_timeout=settings.s3_connect_timeout,
|
||||
read_timeout=settings.s3_read_timeout,
|
||||
retries={
|
||||
"max_attempts": settings.s3_max_retries,
|
||||
"mode": "adaptive", # Adaptive retry mode for better handling
|
||||
},
|
||||
)
|
||||
|
||||
self.client = boto3.client(
|
||||
"s3",
|
||||
@@ -39,12 +240,15 @@ class S3Storage:
|
||||
aws_access_key_id=settings.s3_access_key_id,
|
||||
aws_secret_access_key=settings.s3_secret_access_key,
|
||||
config=config,
|
||||
verify=settings.s3_verify_ssl, # SSL/TLS verification
|
||||
)
|
||||
self.bucket = settings.s3_bucket
|
||||
# Store active multipart uploads for resumable support
|
||||
self._active_uploads: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
def store(self, file: BinaryIO, content_length: Optional[int] = None) -> StorageResult:
|
||||
def store(
|
||||
self, file: BinaryIO, content_length: Optional[int] = None
|
||||
) -> StorageResult:
|
||||
"""
|
||||
Store a file and return StorageResult with all checksums.
|
||||
Content-addressable: if the file already exists, just return the hash.
|
||||
@@ -57,30 +261,91 @@ class S3Storage:
|
||||
return self._store_multipart(file, content_length)
|
||||
|
||||
def _store_simple(self, file: BinaryIO) -> StorageResult:
|
||||
"""Store a small file using simple put_object"""
|
||||
# Read file and compute all hashes
|
||||
"""
|
||||
Store a small file using simple put_object.
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
FileSizeExceededError: If file exceeds maximum size
|
||||
S3ExistenceCheckError: If S3 existence check fails after retries
|
||||
S3UploadError: If S3 upload fails
|
||||
"""
|
||||
# Read file and compute all hashes with error handling
|
||||
try:
|
||||
content = file.read()
|
||||
if not content:
|
||||
raise HashComputationError("Empty file content")
|
||||
|
||||
size = len(content)
|
||||
|
||||
# Enforce file size limit (protection against Content-Length spoofing)
|
||||
if size > settings.max_file_size:
|
||||
raise FileSizeExceededError(
|
||||
f"File size {size} exceeds maximum {settings.max_file_size}"
|
||||
)
|
||||
|
||||
sha256_hash = hashlib.sha256(content).hexdigest()
|
||||
md5_hash = hashlib.md5(content).hexdigest()
|
||||
sha1_hash = hashlib.sha1(content).hexdigest()
|
||||
size = len(content)
|
||||
except (HashComputationError, FileSizeExceededError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Hash computation failed: {e}")
|
||||
raise HashComputationError(f"Failed to compute hash: {e}") from e
|
||||
|
||||
# Check if already exists
|
||||
# Check if already exists (with retry logic)
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
s3_etag = None
|
||||
|
||||
if not self._exists(s3_key):
|
||||
try:
|
||||
exists = self._exists(s3_key)
|
||||
except S3ExistenceCheckError:
|
||||
# Re-raise the specific error
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during S3 existence check: {e}")
|
||||
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
|
||||
|
||||
if not exists:
|
||||
try:
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket,
|
||||
Key=s3_key,
|
||||
Body=content,
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
except (EndpointConnectionError, BotoConnectionError) as e:
|
||||
logger.error(f"S3 storage unavailable: {e}")
|
||||
raise S3StorageUnavailableError(
|
||||
f"Storage backend unavailable: {e}"
|
||||
) from e
|
||||
except (ReadTimeoutError, ConnectTimeoutError) as e:
|
||||
logger.error(f"S3 operation timed out: {e}")
|
||||
raise S3UploadError(f"Upload timed out: {e}") from e
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code", "")
|
||||
if error_code == "ServiceUnavailable":
|
||||
logger.error(f"S3 service unavailable: {e}")
|
||||
raise S3StorageUnavailableError(
|
||||
f"Storage service unavailable: {e}"
|
||||
) from e
|
||||
logger.error(f"S3 upload failed: {e}")
|
||||
raise S3UploadError(f"Failed to upload to S3: {e}") from e
|
||||
else:
|
||||
# Get existing ETag
|
||||
# Get existing ETag and verify integrity (detect potential hash collision)
|
||||
obj_info = self.get_object_info(s3_key)
|
||||
if obj_info:
|
||||
s3_etag = obj_info.get("etag", "").strip('"')
|
||||
# Check for hash collision by comparing size
|
||||
existing_size = obj_info.get("size", 0)
|
||||
if existing_size != size:
|
||||
logger.critical(
|
||||
f"HASH COLLISION DETECTED! Hash {sha256_hash} has size mismatch: "
|
||||
f"existing={existing_size}, new={size}. This is extremely rare."
|
||||
)
|
||||
raise HashCollisionError(
|
||||
f"Hash collision detected for {sha256_hash}: size mismatch"
|
||||
)
|
||||
|
||||
return StorageResult(
|
||||
sha256=sha256_hash,
|
||||
@@ -92,8 +357,17 @@ class S3Storage:
|
||||
)
|
||||
|
||||
def _store_multipart(self, file: BinaryIO, content_length: int) -> StorageResult:
|
||||
"""Store a large file using S3 multipart upload with streaming hash computation"""
|
||||
"""
|
||||
Store a large file using S3 multipart upload with streaming hash computation.
|
||||
|
||||
Raises:
|
||||
HashComputationError: If hash computation fails
|
||||
FileSizeExceededError: If file exceeds maximum size
|
||||
S3ExistenceCheckError: If S3 existence check fails after retries
|
||||
S3UploadError: If S3 upload fails
|
||||
"""
|
||||
# First pass: compute all hashes by streaming through file
|
||||
try:
|
||||
sha256_hasher = hashlib.sha256()
|
||||
md5_hasher = hashlib.md5()
|
||||
sha1_hasher = hashlib.sha1()
|
||||
@@ -109,15 +383,49 @@ class S3Storage:
|
||||
sha1_hasher.update(chunk)
|
||||
size += len(chunk)
|
||||
|
||||
# Enforce file size limit during streaming (protection against spoofing)
|
||||
if size > settings.max_file_size:
|
||||
raise FileSizeExceededError(
|
||||
f"File size exceeds maximum {settings.max_file_size}"
|
||||
)
|
||||
|
||||
if size == 0:
|
||||
raise HashComputationError("Empty file content")
|
||||
|
||||
sha256_hash = sha256_hasher.hexdigest()
|
||||
md5_hash = md5_hasher.hexdigest()
|
||||
sha1_hash = sha1_hasher.hexdigest()
|
||||
except (HashComputationError, FileSizeExceededError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Hash computation failed for multipart upload: {e}")
|
||||
raise HashComputationError(f"Failed to compute hash: {e}") from e
|
||||
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
# Check if already exists (deduplication)
|
||||
if self._exists(s3_key):
|
||||
# Check if already exists (deduplication) with retry logic
|
||||
try:
|
||||
exists = self._exists(s3_key)
|
||||
except S3ExistenceCheckError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during S3 existence check: {e}")
|
||||
raise S3ExistenceCheckError(f"Failed to check S3 existence: {e}") from e
|
||||
|
||||
if exists:
|
||||
obj_info = self.get_object_info(s3_key)
|
||||
s3_etag = obj_info.get("etag", "").strip('"') if obj_info else None
|
||||
# Check for hash collision by comparing size
|
||||
if obj_info:
|
||||
existing_size = obj_info.get("size", 0)
|
||||
if existing_size != size:
|
||||
logger.critical(
|
||||
f"HASH COLLISION DETECTED! Hash {sha256_hash} has size mismatch: "
|
||||
f"existing={existing_size}, new={size}. This is extremely rare."
|
||||
)
|
||||
raise HashCollisionError(
|
||||
f"Hash collision detected for {sha256_hash}: size mismatch"
|
||||
)
|
||||
return StorageResult(
|
||||
sha256=sha256_hash,
|
||||
size=size,
|
||||
@@ -131,7 +439,11 @@ class S3Storage:
|
||||
file.seek(0)
|
||||
|
||||
# Start multipart upload
|
||||
try:
|
||||
mpu = self.client.create_multipart_upload(Bucket=self.bucket, Key=s3_key)
|
||||
except (EndpointConnectionError, BotoConnectionError) as e:
|
||||
logger.error(f"S3 storage unavailable for multipart upload: {e}")
|
||||
raise S3StorageUnavailableError(f"Storage backend unavailable: {e}") from e
|
||||
upload_id = mpu["UploadId"]
|
||||
|
||||
try:
|
||||
@@ -150,10 +462,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=chunk,
|
||||
)
|
||||
parts.append({
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
}
|
||||
)
|
||||
part_number += 1
|
||||
|
||||
# Complete multipart upload
|
||||
@@ -226,7 +540,9 @@ class S3Storage:
|
||||
# Upload based on size
|
||||
if size < MULTIPART_THRESHOLD:
|
||||
content = b"".join(all_chunks)
|
||||
response = self.client.put_object(Bucket=self.bucket, Key=s3_key, Body=content)
|
||||
response = self.client.put_object(
|
||||
Bucket=self.bucket, Key=s3_key, Body=content
|
||||
)
|
||||
s3_etag = response.get("ETag", "").strip('"')
|
||||
else:
|
||||
# Use multipart for large files
|
||||
@@ -251,10 +567,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=part_data,
|
||||
)
|
||||
parts.append({
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
}
|
||||
)
|
||||
part_number += 1
|
||||
|
||||
# Upload remaining buffer
|
||||
@@ -266,10 +584,12 @@ class S3Storage:
|
||||
PartNumber=part_number,
|
||||
Body=buffer,
|
||||
)
|
||||
parts.append({
|
||||
parts.append(
|
||||
{
|
||||
"PartNumber": part_number,
|
||||
"ETag": response["ETag"],
|
||||
})
|
||||
}
|
||||
)
|
||||
|
||||
complete_response = self.client.complete_multipart_upload(
|
||||
Bucket=self.bucket,
|
||||
@@ -326,7 +646,9 @@ class S3Storage:
|
||||
self._active_uploads[upload_id] = session
|
||||
return session
|
||||
|
||||
def upload_part(self, upload_id: str, part_number: int, data: bytes) -> Dict[str, Any]:
|
||||
def upload_part(
|
||||
self, upload_id: str, part_number: int, data: bytes
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Upload a part for a resumable upload.
|
||||
Returns part info including ETag.
|
||||
@@ -434,14 +756,51 @@ class S3Storage:
|
||||
except ClientError:
|
||||
return None
|
||||
|
||||
def _exists(self, s3_key: str) -> bool:
|
||||
"""Check if an object exists"""
|
||||
def _exists(self, s3_key: str, retry: bool = True) -> bool:
|
||||
"""
|
||||
Check if an object exists with optional retry logic.
|
||||
|
||||
Args:
|
||||
s3_key: The S3 key to check
|
||||
retry: Whether to retry on transient failures (default: True)
|
||||
|
||||
Returns:
|
||||
True if object exists, False otherwise
|
||||
|
||||
Raises:
|
||||
S3ExistenceCheckError: If all retries fail due to non-404 errors
|
||||
"""
|
||||
import time
|
||||
|
||||
max_retries = MAX_EXISTENCE_CHECK_RETRIES if retry else 1
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
self.client.head_object(Bucket=self.bucket, Key=s3_key)
|
||||
return True
|
||||
except ClientError:
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code", "")
|
||||
# 404 means object doesn't exist - not an error
|
||||
if error_code in ("404", "NoSuchKey"):
|
||||
return False
|
||||
|
||||
# For other errors, retry
|
||||
last_error = e
|
||||
if attempt < max_retries - 1:
|
||||
logger.warning(
|
||||
f"S3 existence check failed (attempt {attempt + 1}/{max_retries}): {e}"
|
||||
)
|
||||
time.sleep(0.1 * (attempt + 1)) # Exponential backoff
|
||||
|
||||
# All retries failed
|
||||
logger.error(
|
||||
f"S3 existence check failed after {max_retries} attempts: {last_error}"
|
||||
)
|
||||
raise S3ExistenceCheckError(
|
||||
f"Failed to check S3 object existence after {max_retries} attempts: {last_error}"
|
||||
)
|
||||
|
||||
def delete(self, s3_key: str) -> bool:
|
||||
"""Delete an object"""
|
||||
try:
|
||||
@@ -490,12 +849,68 @@ class S3Storage:
|
||||
)
|
||||
return url
|
||||
|
||||
def health_check(self) -> bool:
|
||||
"""
|
||||
Check if the storage backend is healthy and accessible.
|
||||
|
||||
Performs a lightweight HEAD request on the bucket to verify connectivity.
|
||||
|
||||
Returns:
|
||||
True if healthy, False otherwise
|
||||
"""
|
||||
try:
|
||||
self.client.head_bucket(Bucket=self.bucket)
|
||||
return True
|
||||
except ClientError as e:
|
||||
logger.warning(f"Storage health check failed: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during storage health check: {e}")
|
||||
return False
|
||||
|
||||
def verify_integrity(self, s3_key: str, expected_sha256: str) -> bool:
|
||||
"""
|
||||
Verify the integrity of a stored object by downloading and re-hashing.
|
||||
|
||||
This is an expensive operation and should only be used for critical
|
||||
verification scenarios.
|
||||
|
||||
Args:
|
||||
s3_key: The storage key of the file
|
||||
expected_sha256: The expected SHA256 hash
|
||||
|
||||
Returns:
|
||||
True if hash matches, False otherwise
|
||||
"""
|
||||
try:
|
||||
content = self.get(s3_key)
|
||||
actual_hash = hashlib.sha256(content).hexdigest()
|
||||
if actual_hash != expected_sha256:
|
||||
logger.error(
|
||||
f"Integrity verification failed for {s3_key}: "
|
||||
f"expected {expected_sha256[:12]}..., got {actual_hash[:12]}..."
|
||||
)
|
||||
return False
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error during integrity verification for {s3_key}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_storage = None
|
||||
_storage: Optional[S3Storage] = None
|
||||
|
||||
|
||||
def get_storage() -> S3Storage:
|
||||
def get_storage() -> StorageBackend:
|
||||
"""
|
||||
Get the configured storage backend instance.
|
||||
|
||||
Currently returns S3Storage (works with S3-compatible backends like MinIO).
|
||||
Future implementations may support backend selection via configuration.
|
||||
|
||||
Returns:
|
||||
StorageBackend instance
|
||||
"""
|
||||
global _storage
|
||||
if _storage is None:
|
||||
_storage = S3Storage()
|
||||
|
||||
29
backend/pytest.ini
Normal file
29
backend/pytest.ini
Normal file
@@ -0,0 +1,29 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_functions = test_*
|
||||
python_classes = Test*
|
||||
asyncio_mode = auto
|
||||
addopts = -v --tb=short --cov=app --cov-report=term-missing --cov-report=html:coverage_html --cov-fail-under=0
|
||||
filterwarnings =
|
||||
ignore::DeprecationWarning
|
||||
ignore::UserWarning
|
||||
markers =
|
||||
unit: Unit tests (no external dependencies)
|
||||
integration: Integration tests (require database/storage)
|
||||
slow: Slow tests (skip with -m "not slow")
|
||||
|
||||
# Coverage configuration
|
||||
[coverage:run]
|
||||
source = app
|
||||
omit =
|
||||
*/tests/*
|
||||
*/__pycache__/*
|
||||
|
||||
[coverage:report]
|
||||
exclude_lines =
|
||||
pragma: no cover
|
||||
def __repr__
|
||||
raise NotImplementedError
|
||||
if __name__ == .__main__.:
|
||||
pass
|
||||
@@ -9,3 +9,10 @@ pydantic==2.5.3
|
||||
pydantic-settings==2.1.0
|
||||
python-jose[cryptography]==3.3.0
|
||||
passlib[bcrypt]==1.7.4
|
||||
|
||||
# Test dependencies
|
||||
pytest>=7.4.0
|
||||
pytest-asyncio>=0.21.0
|
||||
pytest-cov>=4.1.0
|
||||
httpx>=0.25.0
|
||||
moto[s3]>=4.2.0
|
||||
|
||||
1
backend/tests/__init__.py
Normal file
1
backend/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Test package
|
||||
414
backend/tests/conftest.py
Normal file
414
backend/tests/conftest.py
Normal file
@@ -0,0 +1,414 @@
|
||||
"""
|
||||
Test configuration and fixtures for Orchard backend tests.
|
||||
|
||||
This module provides:
|
||||
- Database fixtures with test isolation
|
||||
- Mock S3 storage using moto
|
||||
- Test data factories for common scenarios
|
||||
"""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
import hashlib
|
||||
from typing import Generator, BinaryIO
|
||||
from unittest.mock import MagicMock, patch
|
||||
import io
|
||||
|
||||
# Set test environment defaults before importing app modules
|
||||
# Use setdefault to NOT override existing env vars (from docker-compose)
|
||||
os.environ.setdefault("ORCHARD_DATABASE_HOST", "localhost")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_PORT", "5432")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_USER", "test")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_PASSWORD", "test")
|
||||
os.environ.setdefault("ORCHARD_DATABASE_DBNAME", "orchard_test")
|
||||
os.environ.setdefault("ORCHARD_S3_ENDPOINT", "http://localhost:9000")
|
||||
os.environ.setdefault("ORCHARD_S3_BUCKET", "test-bucket")
|
||||
os.environ.setdefault("ORCHARD_S3_ACCESS_KEY_ID", "test")
|
||||
os.environ.setdefault("ORCHARD_S3_SECRET_ACCESS_KEY", "test")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Test Data Factories
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def create_test_file(content: bytes = None, size: int = 1024) -> io.BytesIO:
|
||||
"""
|
||||
Create a test file with known content.
|
||||
|
||||
Args:
|
||||
content: Specific content to use, or None to generate random-ish content
|
||||
size: Size of generated content if content is None
|
||||
|
||||
Returns:
|
||||
BytesIO object with the content
|
||||
"""
|
||||
if content is None:
|
||||
content = os.urandom(size)
|
||||
return io.BytesIO(content)
|
||||
|
||||
|
||||
def compute_sha256(content: bytes) -> str:
|
||||
"""Compute SHA256 hash of content as lowercase hex string."""
|
||||
return hashlib.sha256(content).hexdigest()
|
||||
|
||||
|
||||
def compute_md5(content: bytes) -> str:
|
||||
"""Compute MD5 hash of content as lowercase hex string."""
|
||||
return hashlib.md5(content).hexdigest()
|
||||
|
||||
|
||||
def compute_sha1(content: bytes) -> str:
|
||||
"""Compute SHA1 hash of content as lowercase hex string."""
|
||||
return hashlib.sha1(content).hexdigest()
|
||||
|
||||
|
||||
# Known test data with pre-computed hashes
|
||||
TEST_CONTENT_HELLO = b"Hello, World!"
|
||||
TEST_HASH_HELLO = "dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f"
|
||||
TEST_MD5_HELLO = "65a8e27d8879283831b664bd8b7f0ad4"
|
||||
TEST_SHA1_HELLO = "0a0a9f2a6772942557ab5355d76af442f8f65e01"
|
||||
|
||||
TEST_CONTENT_EMPTY = b""
|
||||
# Note: Empty content should be rejected by the storage layer
|
||||
|
||||
TEST_CONTENT_BINARY = bytes(range(256))
|
||||
TEST_HASH_BINARY = compute_sha256(TEST_CONTENT_BINARY)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Mock Storage Fixtures
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class MockS3Client:
|
||||
"""Mock S3 client for unit testing without actual S3/MinIO."""
|
||||
|
||||
def __init__(self):
|
||||
self.objects = {} # key -> content
|
||||
self.bucket = "test-bucket"
|
||||
|
||||
def put_object(self, Bucket: str, Key: str, Body: bytes) -> dict:
|
||||
self.objects[Key] = Body
|
||||
return {"ETag": f'"{compute_md5(Body)}"'}
|
||||
|
||||
def get_object(self, Bucket: str, Key: str, **kwargs) -> dict:
|
||||
if Key not in self.objects:
|
||||
raise Exception("NoSuchKey")
|
||||
content = self.objects[Key]
|
||||
return {
|
||||
"Body": io.BytesIO(content),
|
||||
"ContentLength": len(content),
|
||||
}
|
||||
|
||||
def head_object(self, Bucket: str, Key: str) -> dict:
|
||||
if Key not in self.objects:
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
error_response = {"Error": {"Code": "404", "Message": "Not Found"}}
|
||||
raise ClientError(error_response, "HeadObject")
|
||||
content = self.objects[Key]
|
||||
return {
|
||||
"ContentLength": len(content),
|
||||
"ETag": f'"{compute_md5(content)}"',
|
||||
}
|
||||
|
||||
def delete_object(self, Bucket: str, Key: str) -> dict:
|
||||
if Key in self.objects:
|
||||
del self.objects[Key]
|
||||
return {}
|
||||
|
||||
def head_bucket(self, Bucket: str) -> dict:
|
||||
return {}
|
||||
|
||||
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
|
||||
return {"UploadId": "test-upload-id"}
|
||||
|
||||
def upload_part(
|
||||
self, Bucket: str, Key: str, UploadId: str, PartNumber: int, Body: bytes
|
||||
) -> dict:
|
||||
return {"ETag": f'"{compute_md5(Body)}"'}
|
||||
|
||||
def complete_multipart_upload(
|
||||
self, Bucket: str, Key: str, UploadId: str, MultipartUpload: dict
|
||||
) -> dict:
|
||||
return {"ETag": '"test-etag"'}
|
||||
|
||||
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
|
||||
return {}
|
||||
|
||||
def generate_presigned_url(
|
||||
self, ClientMethod: str, Params: dict, ExpiresIn: int
|
||||
) -> str:
|
||||
return f"https://test-bucket.s3.amazonaws.com/{Params['Key']}?presigned=true"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_s3_client() -> MockS3Client:
|
||||
"""Provide a mock S3 client for unit tests."""
|
||||
return MockS3Client()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_storage(mock_s3_client):
|
||||
"""
|
||||
Provide a mock storage instance for unit tests.
|
||||
|
||||
Uses the MockS3Client to avoid actual S3/MinIO calls.
|
||||
"""
|
||||
from app.storage import S3Storage
|
||||
|
||||
storage = S3Storage.__new__(S3Storage)
|
||||
storage.client = mock_s3_client
|
||||
storage.bucket = "test-bucket"
|
||||
storage._active_uploads = {}
|
||||
|
||||
return storage
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Database Fixtures (for integration tests)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_db_url():
|
||||
"""Get the test database URL."""
|
||||
return (
|
||||
f"postgresql://{os.environ['ORCHARD_DATABASE_USER']}:"
|
||||
f"{os.environ['ORCHARD_DATABASE_PASSWORD']}@"
|
||||
f"{os.environ['ORCHARD_DATABASE_HOST']}:"
|
||||
f"{os.environ['ORCHARD_DATABASE_PORT']}/"
|
||||
f"{os.environ['ORCHARD_DATABASE_DBNAME']}"
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# HTTP Client Fixtures (for API tests)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_app():
|
||||
"""
|
||||
Create a test FastAPI application.
|
||||
|
||||
Note: This requires the database to be available for integration tests.
|
||||
For unit tests, use mock_storage fixture instead.
|
||||
"""
|
||||
from fastapi.testclient import TestClient
|
||||
from app.main import app
|
||||
|
||||
return TestClient(app)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Integration Test Fixtures
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def integration_client():
|
||||
"""
|
||||
Create a test client for integration tests.
|
||||
|
||||
Uses the real database and MinIO from docker-compose.local.yml.
|
||||
"""
|
||||
from httpx import Client
|
||||
|
||||
# Connect to the running orchard-server container
|
||||
base_url = os.environ.get("ORCHARD_TEST_URL", "http://localhost:8080")
|
||||
|
||||
with Client(base_url=base_url, timeout=30.0) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def unique_test_id():
|
||||
"""Generate a unique ID for test isolation."""
|
||||
import uuid
|
||||
|
||||
return f"test-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_project(integration_client, unique_test_id):
|
||||
"""
|
||||
Create a test project and clean it up after the test.
|
||||
|
||||
Yields the project name.
|
||||
"""
|
||||
project_name = f"test-project-{unique_test_id}"
|
||||
|
||||
# Create project
|
||||
response = integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": project_name, "description": "Test project", "is_public": True},
|
||||
)
|
||||
assert response.status_code == 200, f"Failed to create project: {response.text}"
|
||||
|
||||
yield project_name
|
||||
|
||||
# Cleanup: delete project
|
||||
try:
|
||||
integration_client.delete(f"/api/v1/projects/{project_name}")
|
||||
except Exception:
|
||||
pass # Ignore cleanup errors
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_package(integration_client, test_project, unique_test_id):
|
||||
"""
|
||||
Create a test package within a test project.
|
||||
|
||||
Yields (project_name, package_name) tuple.
|
||||
"""
|
||||
package_name = f"test-package-{unique_test_id}"
|
||||
|
||||
# Create package
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{test_project}/packages",
|
||||
json={"name": package_name, "description": "Test package"},
|
||||
)
|
||||
assert response.status_code == 200, f"Failed to create package: {response.text}"
|
||||
|
||||
yield (test_project, package_name)
|
||||
|
||||
# Cleanup handled by test_project fixture (cascade delete)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_content():
|
||||
"""
|
||||
Generate unique test content for each test.
|
||||
|
||||
Returns (content_bytes, expected_sha256) tuple.
|
||||
"""
|
||||
import uuid
|
||||
|
||||
content = f"test-content-{uuid.uuid4().hex}".encode()
|
||||
sha256 = compute_sha256(content)
|
||||
return (content, sha256)
|
||||
|
||||
|
||||
def upload_test_file(
|
||||
client,
|
||||
project: str,
|
||||
package: str,
|
||||
content: bytes,
|
||||
filename: str = "test.bin",
|
||||
tag: str = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Helper function to upload a test file.
|
||||
|
||||
Returns the upload response as a dict.
|
||||
"""
|
||||
files = {"file": (filename, io.BytesIO(content), "application/octet-stream")}
|
||||
data = {}
|
||||
if tag:
|
||||
data["tag"] = tag
|
||||
|
||||
response = client.post(
|
||||
f"/api/v1/project/{project}/{package}/upload",
|
||||
files=files,
|
||||
data=data if data else None,
|
||||
)
|
||||
assert response.status_code == 200, f"Upload failed: {response.text}"
|
||||
return response.json()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# S3 Direct Access Helpers (for integration tests)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def get_s3_client():
|
||||
"""
|
||||
Create a boto3 S3 client for direct S3 access in integration tests.
|
||||
|
||||
Uses environment variables for configuration (same as the app).
|
||||
Note: When running in container, S3 endpoint should be 'minio:9000' not 'localhost:9000'.
|
||||
"""
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
|
||||
config = Config(s3={"addressing_style": "path"})
|
||||
|
||||
# Use the same endpoint as the app (minio:9000 in container, localhost:9000 locally)
|
||||
endpoint = os.environ.get("ORCHARD_S3_ENDPOINT", "http://minio:9000")
|
||||
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=endpoint,
|
||||
region_name=os.environ.get("ORCHARD_S3_REGION", "us-east-1"),
|
||||
aws_access_key_id=os.environ.get("ORCHARD_S3_ACCESS_KEY_ID", "minioadmin"),
|
||||
aws_secret_access_key=os.environ.get(
|
||||
"ORCHARD_S3_SECRET_ACCESS_KEY", "minioadmin"
|
||||
),
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
def get_s3_bucket():
|
||||
"""Get the S3 bucket name from environment."""
|
||||
return os.environ.get("ORCHARD_S3_BUCKET", "orchard-artifacts")
|
||||
|
||||
|
||||
def list_s3_objects_by_hash(sha256_hash: str) -> list:
|
||||
"""
|
||||
List S3 objects that match a specific SHA256 hash.
|
||||
|
||||
Uses the fruits/{hash[:2]}/{hash[2:4]}/{hash} key pattern.
|
||||
Returns list of matching object keys.
|
||||
"""
|
||||
client = get_s3_client()
|
||||
bucket = get_s3_bucket()
|
||||
prefix = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
response = client.list_objects_v2(Bucket=bucket, Prefix=prefix)
|
||||
|
||||
if "Contents" not in response:
|
||||
return []
|
||||
|
||||
return [obj["Key"] for obj in response["Contents"]]
|
||||
|
||||
|
||||
def count_s3_objects_by_prefix(prefix: str) -> int:
|
||||
"""
|
||||
Count S3 objects with a given prefix.
|
||||
|
||||
Useful for checking if duplicate uploads created multiple objects.
|
||||
"""
|
||||
client = get_s3_client()
|
||||
bucket = get_s3_bucket()
|
||||
|
||||
response = client.list_objects_v2(Bucket=bucket, Prefix=prefix)
|
||||
|
||||
if "Contents" not in response:
|
||||
return 0
|
||||
|
||||
return len(response["Contents"])
|
||||
|
||||
|
||||
def s3_object_exists(sha256_hash: str) -> bool:
|
||||
"""
|
||||
Check if an S3 object exists for a given SHA256 hash.
|
||||
"""
|
||||
objects = list_s3_objects_by_hash(sha256_hash)
|
||||
return len(objects) > 0
|
||||
|
||||
|
||||
def delete_s3_object_by_hash(sha256_hash: str) -> bool:
|
||||
"""
|
||||
Delete an S3 object by its SHA256 hash (for test cleanup).
|
||||
"""
|
||||
client = get_s3_client()
|
||||
bucket = get_s3_bucket()
|
||||
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
|
||||
|
||||
try:
|
||||
client.delete_object(Bucket=bucket, Key=s3_key)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
207
backend/tests/test_duplicate_detection.py
Normal file
207
backend/tests/test_duplicate_detection.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""
|
||||
Unit tests for duplicate detection and deduplication logic.
|
||||
|
||||
Tests cover:
|
||||
- _exists() method correctly identifies existing S3 keys
|
||||
- S3 key generation follows expected pattern
|
||||
- Storage layer skips upload when artifact already exists
|
||||
- Storage layer performs upload when artifact does not exist
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import io
|
||||
from unittest.mock import MagicMock, patch
|
||||
from tests.conftest import (
|
||||
compute_sha256,
|
||||
TEST_CONTENT_HELLO,
|
||||
TEST_HASH_HELLO,
|
||||
)
|
||||
|
||||
|
||||
class TestExistsMethod:
|
||||
"""Tests for the _exists() method that checks S3 object existence."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_exists_returns_true_for_existing_key(self, mock_storage, mock_s3_client):
|
||||
"""Test _exists() returns True when object exists."""
|
||||
# Pre-populate the mock storage
|
||||
test_key = "fruits/df/fd/test-hash"
|
||||
mock_s3_client.objects[test_key] = b"content"
|
||||
|
||||
result = mock_storage._exists(test_key)
|
||||
|
||||
assert result is True
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_exists_returns_false_for_nonexistent_key(self, mock_storage):
|
||||
"""Test _exists() returns False when object doesn't exist."""
|
||||
result = mock_storage._exists("fruits/no/ne/nonexistent-key")
|
||||
|
||||
assert result is False
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_exists_handles_404_error(self, mock_storage):
|
||||
"""Test _exists() handles 404 errors gracefully."""
|
||||
# The mock client raises ClientError for nonexistent keys
|
||||
result = mock_storage._exists("fruits/xx/yy/does-not-exist")
|
||||
|
||||
assert result is False
|
||||
|
||||
|
||||
class TestS3KeyGeneration:
|
||||
"""Tests for S3 key pattern generation."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_s3_key_pattern(self):
|
||||
"""Test S3 key follows pattern: fruits/{hash[:2]}/{hash[2:4]}/{hash}"""
|
||||
test_hash = "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
|
||||
|
||||
expected_key = f"fruits/{test_hash[:2]}/{test_hash[2:4]}/{test_hash}"
|
||||
# Expected: fruits/ab/cd/abcdef1234567890...
|
||||
|
||||
assert expected_key == f"fruits/ab/cd/{test_hash}"
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_s3_key_generation_in_storage(self, mock_storage):
|
||||
"""Test storage layer generates correct S3 key."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_key = (
|
||||
f"fruits/{TEST_HASH_HELLO[:2]}/{TEST_HASH_HELLO[2:4]}/{TEST_HASH_HELLO}"
|
||||
)
|
||||
assert result.s3_key == expected_key
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_s3_key_uses_sha256_hash(self, mock_storage):
|
||||
"""Test S3 key is derived from SHA256 hash."""
|
||||
content = b"unique test content for key test"
|
||||
file_obj = io.BytesIO(content)
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# Key should contain the hash
|
||||
assert expected_hash in result.s3_key
|
||||
|
||||
|
||||
class TestDeduplicationBehavior:
|
||||
"""Tests for deduplication (skip upload when exists)."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_skips_upload_when_exists(self, mock_storage, mock_s3_client):
|
||||
"""Test storage skips S3 upload when artifact already exists."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
s3_key = (
|
||||
f"fruits/{TEST_HASH_HELLO[:2]}/{TEST_HASH_HELLO[2:4]}/{TEST_HASH_HELLO}"
|
||||
)
|
||||
|
||||
# Pre-populate storage (simulate existing artifact)
|
||||
mock_s3_client.objects[s3_key] = content
|
||||
|
||||
# Track put_object calls
|
||||
original_put = mock_s3_client.put_object
|
||||
put_called = []
|
||||
|
||||
def tracked_put(*args, **kwargs):
|
||||
put_called.append(True)
|
||||
return original_put(*args, **kwargs)
|
||||
|
||||
mock_s3_client.put_object = tracked_put
|
||||
|
||||
# Store the same content
|
||||
file_obj = io.BytesIO(content)
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# put_object should NOT have been called (deduplication)
|
||||
assert len(put_called) == 0
|
||||
assert result.sha256 == TEST_HASH_HELLO
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_uploads_when_not_exists(self, mock_storage, mock_s3_client):
|
||||
"""Test storage uploads to S3 when artifact doesn't exist."""
|
||||
content = b"brand new unique content"
|
||||
content_hash = compute_sha256(content)
|
||||
s3_key = f"fruits/{content_hash[:2]}/{content_hash[2:4]}/{content_hash}"
|
||||
|
||||
# Ensure object doesn't exist
|
||||
assert s3_key not in mock_s3_client.objects
|
||||
|
||||
# Store the content
|
||||
file_obj = io.BytesIO(content)
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# Object should now exist in mock storage
|
||||
assert s3_key in mock_s3_client.objects
|
||||
assert mock_s3_client.objects[s3_key] == content
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_returns_same_hash_for_duplicate(self, mock_storage, mock_s3_client):
|
||||
"""Test storing same content twice returns same hash."""
|
||||
content = b"content to be stored twice"
|
||||
|
||||
# First store
|
||||
file1 = io.BytesIO(content)
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
|
||||
# Second store (duplicate)
|
||||
file2 = io.BytesIO(content)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
|
||||
assert result1.sha256 == result2.sha256
|
||||
assert result1.s3_key == result2.s3_key
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_different_content_different_keys(self, mock_storage):
|
||||
"""Test different content produces different S3 keys."""
|
||||
content1 = b"first content"
|
||||
content2 = b"second content"
|
||||
|
||||
file1 = io.BytesIO(content1)
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
|
||||
file2 = io.BytesIO(content2)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
|
||||
assert result1.sha256 != result2.sha256
|
||||
assert result1.s3_key != result2.s3_key
|
||||
|
||||
|
||||
class TestDeduplicationEdgeCases:
|
||||
"""Edge case tests for deduplication."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_same_content_different_filenames(self, mock_storage):
|
||||
"""Test same content with different metadata is deduplicated."""
|
||||
content = b"identical content"
|
||||
|
||||
# Store with "filename1"
|
||||
file1 = io.BytesIO(content)
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
|
||||
# Store with "filename2" (same content)
|
||||
file2 = io.BytesIO(content)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
|
||||
# Both should have same hash (content-addressable)
|
||||
assert result1.sha256 == result2.sha256
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_whitespace_only_difference(self, mock_storage):
|
||||
"""Test content differing only by whitespace produces different hashes."""
|
||||
content1 = b"test content"
|
||||
content2 = b"test content" # Extra space
|
||||
content3 = b"test content " # Trailing space
|
||||
|
||||
file1 = io.BytesIO(content1)
|
||||
file2 = io.BytesIO(content2)
|
||||
file3 = io.BytesIO(content3)
|
||||
|
||||
result1 = mock_storage._store_simple(file1)
|
||||
result2 = mock_storage._store_simple(file2)
|
||||
result3 = mock_storage._store_simple(file3)
|
||||
|
||||
# All should be different (content-addressable)
|
||||
assert len({result1.sha256, result2.sha256, result3.sha256}) == 3
|
||||
168
backend/tests/test_garbage_collection.py
Normal file
168
backend/tests/test_garbage_collection.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""
|
||||
Integration tests for garbage collection functionality.
|
||||
|
||||
Tests cover:
|
||||
- Listing orphaned artifacts (ref_count=0)
|
||||
- Garbage collection in dry-run mode
|
||||
- Garbage collection actual deletion
|
||||
- Verifying artifacts with refs are not deleted
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from tests.conftest import (
|
||||
compute_sha256,
|
||||
upload_test_file,
|
||||
)
|
||||
|
||||
|
||||
class TestOrphanedArtifactsEndpoint:
|
||||
"""Tests for GET /api/v1/admin/orphaned-artifacts endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_list_orphaned_artifacts_returns_list(self, integration_client):
|
||||
"""Test orphaned artifacts endpoint returns a list."""
|
||||
response = integration_client.get("/api/v1/admin/orphaned-artifacts")
|
||||
assert response.status_code == 200
|
||||
assert isinstance(response.json(), list)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_orphaned_artifact_has_required_fields(self, integration_client):
|
||||
"""Test orphaned artifact response has required fields."""
|
||||
response = integration_client.get("/api/v1/admin/orphaned-artifacts?limit=1")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
if len(data) > 0:
|
||||
artifact = data[0]
|
||||
assert "id" in artifact
|
||||
assert "size" in artifact
|
||||
assert "created_at" in artifact
|
||||
assert "created_by" in artifact
|
||||
assert "original_name" in artifact
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_orphaned_artifacts_respects_limit(self, integration_client):
|
||||
"""Test orphaned artifacts endpoint respects limit parameter."""
|
||||
response = integration_client.get("/api/v1/admin/orphaned-artifacts?limit=5")
|
||||
assert response.status_code == 200
|
||||
assert len(response.json()) <= 5
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_becomes_orphaned_when_tag_deleted(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test artifact appears in orphaned list after tag is deleted."""
|
||||
project, package = test_package
|
||||
content = f"orphan test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with tag
|
||||
upload_test_file(integration_client, project, package, content, tag="temp-tag")
|
||||
|
||||
# Verify not in orphaned list (has ref_count=1)
|
||||
response = integration_client.get("/api/v1/admin/orphaned-artifacts?limit=1000")
|
||||
orphaned_ids = [a["id"] for a in response.json()]
|
||||
assert expected_hash not in orphaned_ids
|
||||
|
||||
# Delete the tag
|
||||
integration_client.delete(f"/api/v1/project/{project}/{package}/tags/temp-tag")
|
||||
|
||||
# Verify now in orphaned list (ref_count=0)
|
||||
response = integration_client.get("/api/v1/admin/orphaned-artifacts?limit=1000")
|
||||
orphaned_ids = [a["id"] for a in response.json()]
|
||||
assert expected_hash in orphaned_ids
|
||||
|
||||
|
||||
class TestGarbageCollectionEndpoint:
|
||||
"""Tests for POST /api/v1/admin/garbage-collect endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_garbage_collect_dry_run_returns_response(self, integration_client):
|
||||
"""Test garbage collection dry run returns valid response."""
|
||||
response = integration_client.post("/api/v1/admin/garbage-collect?dry_run=true")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "artifacts_deleted" in data
|
||||
assert "bytes_freed" in data
|
||||
assert "artifact_ids" in data
|
||||
assert "dry_run" in data
|
||||
assert data["dry_run"] is True
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_garbage_collect_dry_run_doesnt_delete(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test garbage collection dry run doesn't actually delete artifacts."""
|
||||
project, package = test_package
|
||||
content = f"dry run test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload and delete tag to create orphan
|
||||
upload_test_file(integration_client, project, package, content, tag="dry-run")
|
||||
integration_client.delete(f"/api/v1/project/{project}/{package}/tags/dry-run")
|
||||
|
||||
# Verify artifact exists
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Run garbage collection in dry-run mode
|
||||
gc_response = integration_client.post(
|
||||
"/api/v1/admin/garbage-collect?dry_run=true&limit=1000"
|
||||
)
|
||||
assert gc_response.status_code == 200
|
||||
assert expected_hash in gc_response.json()["artifact_ids"]
|
||||
|
||||
# Verify artifact STILL exists (dry run didn't delete)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_garbage_collect_preserves_referenced_artifacts(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test garbage collection doesn't delete artifacts with ref_count > 0."""
|
||||
project, package = test_package
|
||||
content = f"preserve test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with tag (ref_count=1)
|
||||
upload_test_file(integration_client, project, package, content, tag="keep-this")
|
||||
|
||||
# Verify artifact exists with ref_count=1
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
# Run garbage collection (dry_run to not affect other tests)
|
||||
gc_response = integration_client.post(
|
||||
"/api/v1/admin/garbage-collect?dry_run=true&limit=1000"
|
||||
)
|
||||
assert gc_response.status_code == 200
|
||||
|
||||
# Verify artifact was NOT in delete list (has ref_count > 0)
|
||||
assert expected_hash not in gc_response.json()["artifact_ids"]
|
||||
|
||||
# Verify artifact still exists
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_garbage_collect_respects_limit(self, integration_client):
|
||||
"""Test garbage collection respects limit parameter."""
|
||||
response = integration_client.post(
|
||||
"/api/v1/admin/garbage-collect?dry_run=true&limit=5"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json()["artifacts_deleted"] <= 5
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_garbage_collect_returns_bytes_freed(self, integration_client):
|
||||
"""Test garbage collection returns accurate bytes_freed."""
|
||||
response = integration_client.post("/api/v1/admin/garbage-collect?dry_run=true")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["bytes_freed"] >= 0
|
||||
assert isinstance(data["bytes_freed"], int)
|
||||
215
backend/tests/test_hash_calculation.py
Normal file
215
backend/tests/test_hash_calculation.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""
|
||||
Unit tests for SHA256 hash calculation and deduplication logic.
|
||||
|
||||
Tests cover:
|
||||
- Hash computation produces consistent results
|
||||
- Hash is always 64 character lowercase hexadecimal
|
||||
- Different content produces different hashes
|
||||
- Binary content handling
|
||||
- Large file handling (streaming)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import hashlib
|
||||
import io
|
||||
from tests.conftest import (
|
||||
create_test_file,
|
||||
compute_sha256,
|
||||
TEST_CONTENT_HELLO,
|
||||
TEST_HASH_HELLO,
|
||||
TEST_CONTENT_BINARY,
|
||||
TEST_HASH_BINARY,
|
||||
)
|
||||
|
||||
|
||||
class TestHashComputation:
|
||||
"""Unit tests for hash calculation functionality."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_consistent_results(self):
|
||||
"""Test SHA256 hash produces consistent results for identical content."""
|
||||
content = b"test content for hashing"
|
||||
|
||||
# Compute hash multiple times
|
||||
hash1 = compute_sha256(content)
|
||||
hash2 = compute_sha256(content)
|
||||
hash3 = compute_sha256(content)
|
||||
|
||||
assert hash1 == hash2 == hash3
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_different_content_different_hash(self):
|
||||
"""Test SHA256 produces different hashes for different content."""
|
||||
content1 = b"content version 1"
|
||||
content2 = b"content version 2"
|
||||
|
||||
hash1 = compute_sha256(content1)
|
||||
hash2 = compute_sha256(content2)
|
||||
|
||||
assert hash1 != hash2
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_format_64_char_hex(self):
|
||||
"""Test SHA256 hash is always 64 character lowercase hexadecimal."""
|
||||
test_cases = [
|
||||
b"", # Empty
|
||||
b"a", # Single char
|
||||
b"Hello, World!", # Normal string
|
||||
bytes(range(256)), # All byte values
|
||||
b"x" * 10000, # Larger content
|
||||
]
|
||||
|
||||
for content in test_cases:
|
||||
hash_value = compute_sha256(content)
|
||||
|
||||
# Check length
|
||||
assert len(hash_value) == 64, (
|
||||
f"Hash length should be 64, got {len(hash_value)}"
|
||||
)
|
||||
|
||||
# Check lowercase
|
||||
assert hash_value == hash_value.lower(), "Hash should be lowercase"
|
||||
|
||||
# Check hexadecimal
|
||||
assert all(c in "0123456789abcdef" for c in hash_value), (
|
||||
"Hash should be hex"
|
||||
)
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_known_value(self):
|
||||
"""Test SHA256 produces expected hash for known input."""
|
||||
assert compute_sha256(TEST_CONTENT_HELLO) == TEST_HASH_HELLO
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_binary_content(self):
|
||||
"""Test SHA256 handles binary content correctly."""
|
||||
assert compute_sha256(TEST_CONTENT_BINARY) == TEST_HASH_BINARY
|
||||
|
||||
# Test with null bytes
|
||||
content_with_nulls = b"\x00\x00test\x00\x00"
|
||||
hash_value = compute_sha256(content_with_nulls)
|
||||
assert len(hash_value) == 64
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_streaming_computation(self):
|
||||
"""Test SHA256 can be computed in chunks (streaming)."""
|
||||
# Large content
|
||||
chunk_size = 8192
|
||||
total_size = chunk_size * 10 # 80KB
|
||||
content = b"x" * total_size
|
||||
|
||||
# Direct computation
|
||||
direct_hash = compute_sha256(content)
|
||||
|
||||
# Streaming computation
|
||||
hasher = hashlib.sha256()
|
||||
for i in range(0, total_size, chunk_size):
|
||||
hasher.update(content[i : i + chunk_size])
|
||||
streaming_hash = hasher.hexdigest()
|
||||
|
||||
assert direct_hash == streaming_hash
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sha256_order_matters(self):
|
||||
"""Test that content order affects hash (not just content set)."""
|
||||
content1 = b"AB"
|
||||
content2 = b"BA"
|
||||
|
||||
assert compute_sha256(content1) != compute_sha256(content2)
|
||||
|
||||
|
||||
class TestStorageHashComputation:
|
||||
"""Tests for hash computation in the storage layer."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_computes_sha256(self, mock_storage):
|
||||
"""Test storage layer correctly computes SHA256 hash."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
assert result.sha256 == TEST_HASH_HELLO
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_computes_md5(self, mock_storage):
|
||||
"""Test storage layer also computes MD5 hash."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_md5 = hashlib.md5(content).hexdigest()
|
||||
assert result.md5 == expected_md5
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_computes_sha1(self, mock_storage):
|
||||
"""Test storage layer also computes SHA1 hash."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_sha1 = hashlib.sha1(content).hexdigest()
|
||||
assert result.sha1 == expected_sha1
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_returns_correct_size(self, mock_storage):
|
||||
"""Test storage layer returns correct file size."""
|
||||
content = b"test content with known size"
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
assert result.size == len(content)
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_storage_generates_correct_s3_key(self, mock_storage):
|
||||
"""Test storage layer generates correct S3 key pattern."""
|
||||
content = TEST_CONTENT_HELLO
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
# Key should be: fruits/{hash[:2]}/{hash[2:4]}/{hash}
|
||||
expected_key = (
|
||||
f"fruits/{TEST_HASH_HELLO[:2]}/{TEST_HASH_HELLO[2:4]}/{TEST_HASH_HELLO}"
|
||||
)
|
||||
assert result.s3_key == expected_key
|
||||
|
||||
|
||||
class TestHashEdgeCases:
|
||||
"""Edge case tests for hash computation."""
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_hash_empty_content_rejected(self, mock_storage):
|
||||
"""Test that empty content is rejected."""
|
||||
from app.storage import HashComputationError
|
||||
|
||||
file_obj = io.BytesIO(b"")
|
||||
|
||||
with pytest.raises(HashComputationError):
|
||||
mock_storage._store_simple(file_obj)
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_hash_large_file_streaming(self, mock_storage):
|
||||
"""Test hash computation for large files uses streaming."""
|
||||
# Create a 10MB file
|
||||
size = 10 * 1024 * 1024
|
||||
content = b"x" * size
|
||||
file_obj = io.BytesIO(content)
|
||||
|
||||
result = mock_storage._store_simple(file_obj)
|
||||
|
||||
expected_hash = compute_sha256(content)
|
||||
assert result.sha256 == expected_hash
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_hash_special_bytes(self):
|
||||
"""Test hash handles all byte values correctly."""
|
||||
# All possible byte values
|
||||
content = bytes(range(256))
|
||||
hash_value = compute_sha256(content)
|
||||
|
||||
assert len(hash_value) == 64
|
||||
assert hash_value == TEST_HASH_BINARY
|
||||
604
backend/tests/test_integration_uploads.py
Normal file
604
backend/tests/test_integration_uploads.py
Normal file
@@ -0,0 +1,604 @@
|
||||
"""
|
||||
Integration tests for duplicate uploads and storage verification.
|
||||
|
||||
These tests require the full stack to be running (docker-compose.local.yml).
|
||||
|
||||
Tests cover:
|
||||
- Duplicate upload scenarios across packages and projects
|
||||
- Storage verification (single S3 object, single artifact row)
|
||||
- Upload table tracking
|
||||
- Content integrity verification
|
||||
- Concurrent upload handling
|
||||
- Failure cleanup
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import io
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from tests.conftest import (
|
||||
compute_sha256,
|
||||
upload_test_file,
|
||||
list_s3_objects_by_hash,
|
||||
s3_object_exists,
|
||||
delete_s3_object_by_hash,
|
||||
)
|
||||
|
||||
|
||||
class TestDuplicateUploadScenarios:
|
||||
"""Integration tests for duplicate upload behavior."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_same_file_twice_returns_same_artifact_id(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test uploading same file twice returns same artifact_id."""
|
||||
project, package = test_package
|
||||
content = b"content uploaded twice for same artifact test"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# First upload
|
||||
result1 = upload_test_file(
|
||||
integration_client, project, package, content, tag="first"
|
||||
)
|
||||
assert result1["artifact_id"] == expected_hash
|
||||
|
||||
# Second upload
|
||||
result2 = upload_test_file(
|
||||
integration_client, project, package, content, tag="second"
|
||||
)
|
||||
assert result2["artifact_id"] == expected_hash
|
||||
assert result1["artifact_id"] == result2["artifact_id"]
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_same_file_twice_increments_ref_count(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test uploading same file twice increments ref_count to 2."""
|
||||
project, package = test_package
|
||||
content = b"content for ref count increment test"
|
||||
|
||||
# First upload
|
||||
result1 = upload_test_file(
|
||||
integration_client, project, package, content, tag="v1"
|
||||
)
|
||||
assert result1["ref_count"] == 1
|
||||
|
||||
# Second upload
|
||||
result2 = upload_test_file(
|
||||
integration_client, project, package, content, tag="v2"
|
||||
)
|
||||
assert result2["ref_count"] == 2
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_same_file_different_packages_shares_artifact(
|
||||
self, integration_client, test_project, unique_test_id
|
||||
):
|
||||
"""Test uploading same file to different packages shares artifact."""
|
||||
project = test_project
|
||||
content = f"content shared across packages {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Create two packages
|
||||
pkg1 = f"package-a-{unique_test_id}"
|
||||
pkg2 = f"package-b-{unique_test_id}"
|
||||
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{project}/packages",
|
||||
json={"name": pkg1, "description": "Package A"},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{project}/packages",
|
||||
json={"name": pkg2, "description": "Package B"},
|
||||
)
|
||||
|
||||
# Upload to first package
|
||||
result1 = upload_test_file(integration_client, project, pkg1, content, tag="v1")
|
||||
assert result1["artifact_id"] == expected_hash
|
||||
assert result1["deduplicated"] is False
|
||||
|
||||
# Upload to second package
|
||||
result2 = upload_test_file(integration_client, project, pkg2, content, tag="v1")
|
||||
assert result2["artifact_id"] == expected_hash
|
||||
assert result2["deduplicated"] is True
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_same_file_different_projects_shares_artifact(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test uploading same file to different projects shares artifact."""
|
||||
content = f"content shared across projects {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Create two projects with packages
|
||||
proj1 = f"project-x-{unique_test_id}"
|
||||
proj2 = f"project-y-{unique_test_id}"
|
||||
pkg_name = "shared-pkg"
|
||||
|
||||
try:
|
||||
# Create projects and packages
|
||||
integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": proj1, "description": "Project X", "is_public": True},
|
||||
)
|
||||
integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": proj2, "description": "Project Y", "is_public": True},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{proj1}/packages",
|
||||
json={"name": pkg_name, "description": "Package"},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{proj2}/packages",
|
||||
json={"name": pkg_name, "description": "Package"},
|
||||
)
|
||||
|
||||
# Upload to first project
|
||||
result1 = upload_test_file(
|
||||
integration_client, proj1, pkg_name, content, tag="v1"
|
||||
)
|
||||
assert result1["artifact_id"] == expected_hash
|
||||
assert result1["deduplicated"] is False
|
||||
|
||||
# Upload to second project
|
||||
result2 = upload_test_file(
|
||||
integration_client, proj2, pkg_name, content, tag="v1"
|
||||
)
|
||||
assert result2["artifact_id"] == expected_hash
|
||||
assert result2["deduplicated"] is True
|
||||
|
||||
finally:
|
||||
# Cleanup
|
||||
integration_client.delete(f"/api/v1/projects/{proj1}")
|
||||
integration_client.delete(f"/api/v1/projects/{proj2}")
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_same_file_different_filenames_shares_artifact(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test uploading same file with different original filenames shares artifact."""
|
||||
project, package = test_package
|
||||
content = b"content with different filenames"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with filename1
|
||||
result1 = upload_test_file(
|
||||
integration_client,
|
||||
project,
|
||||
package,
|
||||
content,
|
||||
filename="file1.bin",
|
||||
tag="v1",
|
||||
)
|
||||
assert result1["artifact_id"] == expected_hash
|
||||
|
||||
# Upload with filename2
|
||||
result2 = upload_test_file(
|
||||
integration_client,
|
||||
project,
|
||||
package,
|
||||
content,
|
||||
filename="file2.bin",
|
||||
tag="v2",
|
||||
)
|
||||
assert result2["artifact_id"] == expected_hash
|
||||
assert result2["deduplicated"] is True
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_same_file_different_tags_shares_artifact(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test uploading same file with different tags shares artifact."""
|
||||
project, package = test_package
|
||||
content = f"content with different tags {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
tags = ["latest", "stable", "v1.0.0", "release"]
|
||||
for i, tag in enumerate(tags):
|
||||
result = upload_test_file(
|
||||
integration_client, project, package, content, tag=tag
|
||||
)
|
||||
assert result["artifact_id"] == expected_hash
|
||||
if i == 0:
|
||||
assert result["deduplicated"] is False
|
||||
else:
|
||||
assert result["deduplicated"] is True
|
||||
|
||||
|
||||
class TestStorageVerification:
|
||||
"""Tests to verify storage behavior after duplicate uploads."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_table_single_row_after_duplicates(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test artifact table contains only one row after duplicate uploads."""
|
||||
project, package = test_package
|
||||
content = b"content for single row test"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload same content multiple times with different tags
|
||||
for tag in ["v1", "v2", "v3"]:
|
||||
upload_test_file(integration_client, project, package, content, tag=tag)
|
||||
|
||||
# Query artifact - should exist and be unique
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
artifact = response.json()
|
||||
assert artifact["id"] == expected_hash
|
||||
assert artifact["ref_count"] == 3
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_upload_table_multiple_rows_for_duplicates(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test upload table contains multiple rows for duplicate uploads (event tracking)."""
|
||||
project, package = test_package
|
||||
content = b"content for upload tracking test"
|
||||
|
||||
# Upload same content 3 times
|
||||
for tag in ["upload1", "upload2", "upload3"]:
|
||||
upload_test_file(integration_client, project, package, content, tag=tag)
|
||||
|
||||
# Check package stats - should show 3 uploads but fewer unique artifacts
|
||||
response = integration_client.get(
|
||||
f"/api/v1/project/{project}/packages/{package}"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
pkg_info = response.json()
|
||||
assert pkg_info["tag_count"] == 3
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_content_matches_original(self, integration_client, test_package):
|
||||
"""Test artifact content retrieved matches original content exactly."""
|
||||
project, package = test_package
|
||||
original_content = b"exact content verification test data 12345"
|
||||
|
||||
# Upload
|
||||
result = upload_test_file(
|
||||
integration_client, project, package, original_content, tag="verify"
|
||||
)
|
||||
|
||||
# Download and compare
|
||||
download_response = integration_client.get(
|
||||
f"/api/v1/project/{project}/{package}/+/verify", params={"mode": "proxy"}
|
||||
)
|
||||
assert download_response.status_code == 200
|
||||
downloaded_content = download_response.content
|
||||
assert downloaded_content == original_content
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_storage_stats_reflect_deduplication(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test total storage size matches single artifact size after duplicates."""
|
||||
project, package = test_package
|
||||
content = b"content for storage stats test - should only count once"
|
||||
content_size = len(content)
|
||||
|
||||
# Upload same content 5 times
|
||||
for tag in ["a", "b", "c", "d", "e"]:
|
||||
upload_test_file(integration_client, project, package, content, tag=tag)
|
||||
|
||||
# Check global stats
|
||||
response = integration_client.get("/api/v1/stats")
|
||||
assert response.status_code == 200
|
||||
stats = response.json()
|
||||
|
||||
# Deduplication should show savings
|
||||
assert stats["deduplicated_uploads"] > 0
|
||||
assert stats["storage_saved_bytes"] > 0
|
||||
|
||||
|
||||
class TestConcurrentUploads:
|
||||
"""Tests for concurrent upload handling."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_concurrent_uploads_same_file(self, integration_client, test_package):
|
||||
"""Test concurrent uploads of same file handle deduplication correctly."""
|
||||
project, package = test_package
|
||||
content = b"content for concurrent upload test"
|
||||
expected_hash = compute_sha256(content)
|
||||
num_concurrent = 5
|
||||
|
||||
results = []
|
||||
errors = []
|
||||
|
||||
def upload_worker(tag_suffix):
|
||||
try:
|
||||
# Create a new client for this thread
|
||||
from httpx import Client
|
||||
|
||||
base_url = "http://localhost:8080"
|
||||
with Client(base_url=base_url, timeout=30.0) as client:
|
||||
files = {
|
||||
"file": (
|
||||
f"concurrent-{tag_suffix}.bin",
|
||||
io.BytesIO(content),
|
||||
"application/octet-stream",
|
||||
)
|
||||
}
|
||||
response = client.post(
|
||||
f"/api/v1/project/{project}/{package}/upload",
|
||||
files=files,
|
||||
data={"tag": f"concurrent-{tag_suffix}"},
|
||||
)
|
||||
if response.status_code == 200:
|
||||
results.append(response.json())
|
||||
else:
|
||||
errors.append(f"Status {response.status_code}: {response.text}")
|
||||
except Exception as e:
|
||||
errors.append(str(e))
|
||||
|
||||
# Run concurrent uploads
|
||||
with ThreadPoolExecutor(max_workers=num_concurrent) as executor:
|
||||
futures = [executor.submit(upload_worker, i) for i in range(num_concurrent)]
|
||||
for future in as_completed(futures):
|
||||
pass # Wait for all to complete
|
||||
|
||||
# Verify results
|
||||
assert len(errors) == 0, f"Errors during concurrent uploads: {errors}"
|
||||
assert len(results) == num_concurrent
|
||||
|
||||
# All should have same artifact_id
|
||||
artifact_ids = set(r["artifact_id"] for r in results)
|
||||
assert len(artifact_ids) == 1
|
||||
assert expected_hash in artifact_ids
|
||||
|
||||
# Verify final ref_count
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
assert response.json()["ref_count"] == num_concurrent
|
||||
|
||||
|
||||
class TestDeduplicationAcrossRestarts:
|
||||
"""Tests for deduplication persistence."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_deduplication_persists(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""
|
||||
Test deduplication works with persisted data.
|
||||
|
||||
This test uploads content, then uploads the same content again.
|
||||
Since the database persists, the second upload should detect
|
||||
the existing artifact even without server restart.
|
||||
"""
|
||||
project, package = test_package
|
||||
content = f"persisted content for dedup test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# First upload
|
||||
result1 = upload_test_file(
|
||||
integration_client, project, package, content, tag="persist1"
|
||||
)
|
||||
assert result1["artifact_id"] == expected_hash
|
||||
assert result1["deduplicated"] is False
|
||||
|
||||
# Second upload (simulating after restart - data is persisted)
|
||||
result2 = upload_test_file(
|
||||
integration_client, project, package, content, tag="persist2"
|
||||
)
|
||||
assert result2["artifact_id"] == expected_hash
|
||||
assert result2["deduplicated"] is True
|
||||
|
||||
# Verify artifact exists with correct ref_count
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
assert response.json()["ref_count"] == 2
|
||||
|
||||
|
||||
class TestS3ObjectVerification:
|
||||
"""Tests to verify S3 storage behavior directly."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_s3_bucket_single_object_after_duplicates(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test S3 bucket contains only one object after duplicate uploads."""
|
||||
project, package = test_package
|
||||
content = f"content for s3 object count test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload same content multiple times with different tags
|
||||
for tag in ["s3test1", "s3test2", "s3test3"]:
|
||||
upload_test_file(integration_client, project, package, content, tag=tag)
|
||||
|
||||
# Verify only one S3 object exists for this hash
|
||||
s3_objects = list_s3_objects_by_hash(expected_hash)
|
||||
assert len(s3_objects) == 1, (
|
||||
f"Expected 1 S3 object, found {len(s3_objects)}: {s3_objects}"
|
||||
)
|
||||
|
||||
# Verify the object key follows expected pattern
|
||||
expected_key = (
|
||||
f"fruits/{expected_hash[:2]}/{expected_hash[2:4]}/{expected_hash}"
|
||||
)
|
||||
assert s3_objects[0] == expected_key
|
||||
|
||||
|
||||
class TestUploadFailureCleanup:
|
||||
"""Tests for cleanup when uploads fail."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_upload_failure_invalid_project_no_orphaned_s3(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test upload to non-existent project doesn't leave orphaned S3 objects."""
|
||||
content = f"content for orphan s3 test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Attempt upload to non-existent project
|
||||
files = {"file": ("test.bin", io.BytesIO(content), "application/octet-stream")}
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/nonexistent-project-{unique_test_id}/nonexistent-pkg/upload",
|
||||
files=files,
|
||||
data={"tag": "test"},
|
||||
)
|
||||
|
||||
# Upload should fail
|
||||
assert response.status_code == 404
|
||||
|
||||
# Verify no S3 object was created
|
||||
assert not s3_object_exists(expected_hash), (
|
||||
"Orphaned S3 object found after failed upload"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_upload_failure_invalid_package_no_orphaned_s3(
|
||||
self, integration_client, test_project, unique_test_id
|
||||
):
|
||||
"""Test upload to non-existent package doesn't leave orphaned S3 objects."""
|
||||
content = f"content for orphan s3 test pkg {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Attempt upload to non-existent package
|
||||
files = {"file": ("test.bin", io.BytesIO(content), "application/octet-stream")}
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{test_project}/nonexistent-package-{unique_test_id}/upload",
|
||||
files=files,
|
||||
data={"tag": "test"},
|
||||
)
|
||||
|
||||
# Upload should fail
|
||||
assert response.status_code == 404
|
||||
|
||||
# Verify no S3 object was created
|
||||
assert not s3_object_exists(expected_hash), (
|
||||
"Orphaned S3 object found after failed upload"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_upload_failure_empty_file_no_orphaned_s3(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test upload of empty file doesn't leave orphaned S3 objects or DB records."""
|
||||
project, package = test_package
|
||||
content = b"" # Empty content
|
||||
|
||||
# Attempt upload of empty file
|
||||
files = {"file": ("empty.bin", io.BytesIO(content), "application/octet-stream")}
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{project}/{package}/upload",
|
||||
files=files,
|
||||
data={"tag": f"empty-{unique_test_id}"},
|
||||
)
|
||||
|
||||
# Upload should fail (empty files are rejected)
|
||||
assert response.status_code in (400, 422), (
|
||||
f"Expected 400/422, got {response.status_code}"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_upload_failure_no_orphaned_database_records(
|
||||
self, integration_client, test_project, unique_test_id
|
||||
):
|
||||
"""Test failed upload doesn't leave orphaned database records."""
|
||||
content = f"content for db orphan test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Attempt upload to non-existent package (should fail before DB insert)
|
||||
files = {"file": ("test.bin", io.BytesIO(content), "application/octet-stream")}
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{test_project}/nonexistent-package-{unique_test_id}/upload",
|
||||
files=files,
|
||||
data={"tag": "test"},
|
||||
)
|
||||
|
||||
# Upload should fail
|
||||
assert response.status_code == 404
|
||||
|
||||
# Verify no artifact record was created
|
||||
artifact_response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert artifact_response.status_code == 404, (
|
||||
"Orphaned artifact record found after failed upload"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_duplicate_tag_upload_handles_gracefully(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test uploading with duplicate tag is handled without orphaned data."""
|
||||
project, package = test_package
|
||||
content1 = f"content version 1 {unique_test_id}".encode()
|
||||
content2 = f"content version 2 {unique_test_id}".encode()
|
||||
tag = f"duplicate-tag-{unique_test_id}"
|
||||
|
||||
# First upload with tag
|
||||
result1 = upload_test_file(
|
||||
integration_client, project, package, content1, tag=tag
|
||||
)
|
||||
hash1 = result1["artifact_id"]
|
||||
|
||||
# Second upload with same tag (should update the tag to point to new artifact)
|
||||
result2 = upload_test_file(
|
||||
integration_client, project, package, content2, tag=tag
|
||||
)
|
||||
hash2 = result2["artifact_id"]
|
||||
|
||||
# Both artifacts should exist
|
||||
assert integration_client.get(f"/api/v1/artifact/{hash1}").status_code == 200
|
||||
assert integration_client.get(f"/api/v1/artifact/{hash2}").status_code == 200
|
||||
|
||||
# Tag should point to the second artifact
|
||||
tag_response = integration_client.get(
|
||||
f"/api/v1/project/{project}/{package}/tags/{tag}"
|
||||
)
|
||||
assert tag_response.status_code == 200
|
||||
assert tag_response.json()["artifact_id"] == hash2
|
||||
|
||||
|
||||
class TestFileSizeValidation:
|
||||
"""Tests for file size limits and empty file rejection."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_empty_file_rejected(self, integration_client, test_package):
|
||||
"""Test that empty files are rejected with appropriate error."""
|
||||
project, package = test_package
|
||||
|
||||
# Try to upload empty content
|
||||
files = {"file": ("empty.txt", io.BytesIO(b""), "application/octet-stream")}
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{project}/{package}/upload",
|
||||
files=files,
|
||||
)
|
||||
|
||||
# Should be rejected (422 from storage layer or validation)
|
||||
assert response.status_code in [422, 400]
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_small_valid_file_accepted(self, integration_client, test_package):
|
||||
"""Test that small (1 byte) files are accepted."""
|
||||
project, package = test_package
|
||||
content = b"X" # Single byte
|
||||
|
||||
result = upload_test_file(
|
||||
integration_client, project, package, content, tag="tiny"
|
||||
)
|
||||
|
||||
assert result["artifact_id"] is not None
|
||||
assert result["size"] == 1
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_file_size_reported_correctly(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test that file size is correctly reported in response."""
|
||||
project, package = test_package
|
||||
content = f"Test content for size check {unique_test_id}".encode()
|
||||
expected_size = len(content)
|
||||
|
||||
result = upload_test_file(
|
||||
integration_client, project, package, content, tag="size-test"
|
||||
)
|
||||
|
||||
assert result["size"] == expected_size
|
||||
|
||||
# Also verify via artifact endpoint
|
||||
artifact_response = integration_client.get(
|
||||
f"/api/v1/artifact/{result['artifact_id']}"
|
||||
)
|
||||
assert artifact_response.json()["size"] == expected_size
|
||||
458
backend/tests/test_ref_count.py
Normal file
458
backend/tests/test_ref_count.py
Normal file
@@ -0,0 +1,458 @@
|
||||
"""
|
||||
Unit and integration tests for reference counting behavior.
|
||||
|
||||
Tests cover:
|
||||
- ref_count is set correctly for new artifacts
|
||||
- ref_count increments on duplicate uploads
|
||||
- ref_count query correctly identifies existing artifacts
|
||||
- Artifact lookup by SHA256 hash works correctly
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import io
|
||||
from tests.conftest import (
|
||||
compute_sha256,
|
||||
upload_test_file,
|
||||
TEST_CONTENT_HELLO,
|
||||
TEST_HASH_HELLO,
|
||||
)
|
||||
|
||||
|
||||
class TestRefCountQuery:
|
||||
"""Tests for ref_count querying and artifact lookup."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_lookup_by_sha256(self, integration_client, test_package):
|
||||
"""Test artifact lookup by SHA256 hash (primary key) works correctly."""
|
||||
project, package = test_package
|
||||
content = b"unique content for lookup test"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload a file
|
||||
upload_result = upload_test_file(
|
||||
integration_client, project, package, content, tag="v1"
|
||||
)
|
||||
assert upload_result["artifact_id"] == expected_hash
|
||||
|
||||
# Look up artifact by ID (SHA256)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
|
||||
artifact = response.json()
|
||||
assert artifact["id"] == expected_hash
|
||||
assert artifact["sha256"] == expected_hash
|
||||
assert artifact["size"] == len(content)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_query_identifies_existing_artifact(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test ref_count query correctly identifies existing artifacts by hash."""
|
||||
project, package = test_package
|
||||
content = b"content for ref count query test"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload a file with a tag
|
||||
upload_result = upload_test_file(
|
||||
integration_client, project, package, content, tag="v1"
|
||||
)
|
||||
|
||||
# Query artifact and check ref_count
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
|
||||
artifact = response.json()
|
||||
assert artifact["ref_count"] >= 1 # At least 1 from the tag
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_set_to_1_for_new_artifact_with_tag(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test ref_count is set to 1 for new artifacts when created with a tag."""
|
||||
project, package = test_package
|
||||
content = f"brand new content for ref count test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload a new file with a tag
|
||||
upload_result = upload_test_file(
|
||||
integration_client, project, package, content, tag="initial"
|
||||
)
|
||||
|
||||
assert upload_result["artifact_id"] == expected_hash
|
||||
assert upload_result["ref_count"] == 1
|
||||
assert upload_result["deduplicated"] is False
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_increments_on_duplicate_upload_with_tag(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test ref_count is incremented when duplicate content is uploaded with a new tag."""
|
||||
project, package = test_package
|
||||
content = f"content that will be uploaded twice {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# First upload with tag
|
||||
result1 = upload_test_file(
|
||||
integration_client, project, package, content, tag="v1"
|
||||
)
|
||||
assert result1["ref_count"] == 1
|
||||
assert result1["deduplicated"] is False
|
||||
|
||||
# Second upload with different tag (same content)
|
||||
result2 = upload_test_file(
|
||||
integration_client, project, package, content, tag="v2"
|
||||
)
|
||||
assert result2["artifact_id"] == expected_hash
|
||||
assert result2["ref_count"] == 2
|
||||
assert result2["deduplicated"] is True
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_after_multiple_tags(self, integration_client, test_package):
|
||||
"""Test ref_count correctly reflects number of tags pointing to artifact."""
|
||||
project, package = test_package
|
||||
content = b"content for multiple tag test"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with multiple tags
|
||||
tags = ["v1", "v2", "v3", "latest"]
|
||||
for i, tag in enumerate(tags):
|
||||
result = upload_test_file(
|
||||
integration_client, project, package, content, tag=tag
|
||||
)
|
||||
assert result["artifact_id"] == expected_hash
|
||||
assert result["ref_count"] == i + 1
|
||||
|
||||
# Verify final ref_count via artifact endpoint
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.status_code == 200
|
||||
assert response.json()["ref_count"] == len(tags)
|
||||
|
||||
|
||||
class TestRefCountWithDeletion:
|
||||
"""Tests for ref_count behavior when tags are deleted."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_decrements_on_tag_delete(self, integration_client, test_package):
|
||||
"""Test ref_count decrements when a tag is deleted."""
|
||||
project, package = test_package
|
||||
content = b"content for delete test"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with two tags
|
||||
upload_test_file(integration_client, project, package, content, tag="v1")
|
||||
upload_test_file(integration_client, project, package, content, tag="v2")
|
||||
|
||||
# Verify ref_count is 2
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 2
|
||||
|
||||
# Delete one tag
|
||||
delete_response = integration_client.delete(
|
||||
f"/api/v1/project/{project}/{package}/tags/v1"
|
||||
)
|
||||
assert delete_response.status_code == 204
|
||||
|
||||
# Verify ref_count is now 1
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_zero_after_all_tags_deleted(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test ref_count goes to 0 when all tags are deleted."""
|
||||
project, package = test_package
|
||||
content = b"content that will be orphaned"
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with one tag
|
||||
upload_test_file(integration_client, project, package, content, tag="only-tag")
|
||||
|
||||
# Delete the tag
|
||||
integration_client.delete(f"/api/v1/project/{project}/{package}/tags/only-tag")
|
||||
|
||||
# Verify ref_count is 0
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 0
|
||||
|
||||
|
||||
class TestRefCountCascadeDelete:
|
||||
"""Tests for ref_count behavior during cascade deletions."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_decrements_on_package_delete(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test ref_count decrements for all tags when package is deleted."""
|
||||
# Create a project and package manually (not using fixtures to control cleanup)
|
||||
project_name = f"cascade-pkg-{unique_test_id}"
|
||||
package_name = f"test-pkg-{unique_test_id}"
|
||||
|
||||
# Create project
|
||||
response = integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={
|
||||
"name": project_name,
|
||||
"description": "Test project",
|
||||
"is_public": True,
|
||||
},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Create package
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{project_name}/packages",
|
||||
json={"name": package_name, "description": "Test package"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Upload content with multiple tags
|
||||
content = f"cascade delete test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
upload_test_file(
|
||||
integration_client, project_name, package_name, content, tag="v1"
|
||||
)
|
||||
upload_test_file(
|
||||
integration_client, project_name, package_name, content, tag="v2"
|
||||
)
|
||||
upload_test_file(
|
||||
integration_client, project_name, package_name, content, tag="v3"
|
||||
)
|
||||
|
||||
# Verify ref_count is 3
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 3
|
||||
|
||||
# Delete the package (should cascade delete all tags and decrement ref_count)
|
||||
delete_response = integration_client.delete(
|
||||
f"/api/v1/project/{project_name}/packages/{package_name}"
|
||||
)
|
||||
assert delete_response.status_code == 204
|
||||
|
||||
# Verify ref_count is 0 (all tags were deleted)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 0
|
||||
|
||||
# Cleanup: delete the project
|
||||
integration_client.delete(f"/api/v1/projects/{project_name}")
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_decrements_on_project_delete(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test ref_count decrements for all tags in all packages when project is deleted."""
|
||||
# Create a project manually (not using fixtures to control cleanup)
|
||||
project_name = f"cascade-proj-{unique_test_id}"
|
||||
package1_name = f"pkg1-{unique_test_id}"
|
||||
package2_name = f"pkg2-{unique_test_id}"
|
||||
|
||||
# Create project
|
||||
response = integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={
|
||||
"name": project_name,
|
||||
"description": "Test project",
|
||||
"is_public": True,
|
||||
},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Create two packages
|
||||
for pkg_name in [package1_name, package2_name]:
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{project_name}/packages",
|
||||
json={"name": pkg_name, "description": "Test package"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Upload same content with tags in both packages
|
||||
content = f"project cascade test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
upload_test_file(
|
||||
integration_client, project_name, package1_name, content, tag="v1"
|
||||
)
|
||||
upload_test_file(
|
||||
integration_client, project_name, package1_name, content, tag="v2"
|
||||
)
|
||||
upload_test_file(
|
||||
integration_client, project_name, package2_name, content, tag="latest"
|
||||
)
|
||||
upload_test_file(
|
||||
integration_client, project_name, package2_name, content, tag="stable"
|
||||
)
|
||||
|
||||
# Verify ref_count is 4 (2 tags in each of 2 packages)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 4
|
||||
|
||||
# Delete the project (should cascade delete all packages, tags, and decrement ref_count)
|
||||
delete_response = integration_client.delete(f"/api/v1/projects/{project_name}")
|
||||
assert delete_response.status_code == 204
|
||||
|
||||
# Verify ref_count is 0
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 0
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_shared_artifact_ref_count_partial_decrement(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test ref_count correctly decrements when artifact is shared across packages."""
|
||||
# Create project with two packages
|
||||
project_name = f"shared-artifact-{unique_test_id}"
|
||||
package1_name = f"pkg1-{unique_test_id}"
|
||||
package2_name = f"pkg2-{unique_test_id}"
|
||||
|
||||
# Create project
|
||||
response = integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={
|
||||
"name": project_name,
|
||||
"description": "Test project",
|
||||
"is_public": True,
|
||||
},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Create two packages
|
||||
for pkg_name in [package1_name, package2_name]:
|
||||
response = integration_client.post(
|
||||
f"/api/v1/project/{project_name}/packages",
|
||||
json={"name": pkg_name, "description": "Test package"},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Upload same content to both packages
|
||||
content = f"shared artifact {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
upload_test_file(
|
||||
integration_client, project_name, package1_name, content, tag="v1"
|
||||
)
|
||||
upload_test_file(
|
||||
integration_client, project_name, package2_name, content, tag="v1"
|
||||
)
|
||||
|
||||
# Verify ref_count is 2
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 2
|
||||
|
||||
# Delete only package1 (package2 still references the artifact)
|
||||
delete_response = integration_client.delete(
|
||||
f"/api/v1/project/{project_name}/packages/{package1_name}"
|
||||
)
|
||||
assert delete_response.status_code == 204
|
||||
|
||||
# Verify ref_count is 1 (only package2's tag remains)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
# Cleanup
|
||||
integration_client.delete(f"/api/v1/projects/{project_name}")
|
||||
|
||||
|
||||
class TestRefCountTagUpdate:
|
||||
"""Tests for ref_count behavior when tags are updated to point to different artifacts."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_adjusts_on_tag_update(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test ref_count adjusts when a tag is updated to point to a different artifact."""
|
||||
project, package = test_package
|
||||
|
||||
# Upload two different artifacts
|
||||
content1 = f"artifact one {unique_test_id}".encode()
|
||||
content2 = f"artifact two {unique_test_id}".encode()
|
||||
hash1 = compute_sha256(content1)
|
||||
hash2 = compute_sha256(content2)
|
||||
|
||||
# Upload first artifact with tag "latest"
|
||||
upload_test_file(integration_client, project, package, content1, tag="latest")
|
||||
|
||||
# Verify first artifact has ref_count 1
|
||||
response = integration_client.get(f"/api/v1/artifact/{hash1}")
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
# Upload second artifact with different tag
|
||||
upload_test_file(integration_client, project, package, content2, tag="stable")
|
||||
|
||||
# Now update "latest" tag to point to second artifact
|
||||
# This is done by uploading the same content with the same tag
|
||||
upload_test_file(integration_client, project, package, content2, tag="latest")
|
||||
|
||||
# Verify first artifact ref_count decreased to 0 (tag moved away)
|
||||
response = integration_client.get(f"/api/v1/artifact/{hash1}")
|
||||
assert response.json()["ref_count"] == 0
|
||||
|
||||
# Verify second artifact ref_count increased to 2 (stable + latest)
|
||||
response = integration_client.get(f"/api/v1/artifact/{hash2}")
|
||||
assert response.json()["ref_count"] == 2
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_ref_count_unchanged_when_tag_same_artifact(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test ref_count doesn't change when tag is 'updated' to same artifact."""
|
||||
project, package = test_package
|
||||
|
||||
content = f"same artifact {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload with tag
|
||||
upload_test_file(integration_client, project, package, content, tag="v1")
|
||||
|
||||
# Verify ref_count is 1
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
# Upload same content with same tag (no-op)
|
||||
upload_test_file(integration_client, project, package, content, tag="v1")
|
||||
|
||||
# Verify ref_count is still 1 (no double-counting)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_tag_via_post_endpoint_increments_ref_count(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test creating tag via POST /tags endpoint increments ref_count."""
|
||||
project, package = test_package
|
||||
|
||||
content = f"tag endpoint test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload artifact without tag
|
||||
result = upload_test_file(
|
||||
integration_client, project, package, content, filename="test.bin", tag=None
|
||||
)
|
||||
artifact_id = result["artifact_id"]
|
||||
|
||||
# Verify ref_count is 0 (no tags yet)
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 0
|
||||
|
||||
# Create tag via POST endpoint
|
||||
tag_response = integration_client.post(
|
||||
f"/api/v1/project/{project}/{package}/tags",
|
||||
json={"name": "v1.0.0", "artifact_id": artifact_id},
|
||||
)
|
||||
assert tag_response.status_code == 200
|
||||
|
||||
# Verify ref_count is now 1
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 1
|
||||
|
||||
# Create another tag via POST endpoint
|
||||
tag_response = integration_client.post(
|
||||
f"/api/v1/project/{project}/{package}/tags",
|
||||
json={"name": "latest", "artifact_id": artifact_id},
|
||||
)
|
||||
assert tag_response.status_code == 200
|
||||
|
||||
# Verify ref_count is now 2
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}")
|
||||
assert response.json()["ref_count"] == 2
|
||||
488
backend/tests/test_stats_endpoints.py
Normal file
488
backend/tests/test_stats_endpoints.py
Normal file
@@ -0,0 +1,488 @@
|
||||
"""
|
||||
Integration tests for statistics endpoints.
|
||||
|
||||
Tests cover:
|
||||
- Global stats endpoint
|
||||
- Deduplication stats endpoint
|
||||
- Cross-project deduplication
|
||||
- Timeline stats
|
||||
- Export and report endpoints
|
||||
- Package and artifact stats
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from tests.conftest import compute_sha256, upload_test_file
|
||||
|
||||
|
||||
class TestGlobalStats:
|
||||
"""Tests for GET /api/v1/stats endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_stats_returns_valid_response(self, integration_client):
|
||||
"""Test stats endpoint returns expected fields."""
|
||||
response = integration_client.get("/api/v1/stats")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
# Check all required fields exist
|
||||
assert "total_artifacts" in data
|
||||
assert "total_size_bytes" in data
|
||||
assert "unique_artifacts" in data
|
||||
assert "orphaned_artifacts" in data
|
||||
assert "orphaned_size_bytes" in data
|
||||
assert "total_uploads" in data
|
||||
assert "deduplicated_uploads" in data
|
||||
assert "deduplication_ratio" in data
|
||||
assert "storage_saved_bytes" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_stats_values_are_non_negative(self, integration_client):
|
||||
"""Test all stat values are non-negative."""
|
||||
response = integration_client.get("/api/v1/stats")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["total_artifacts"] >= 0
|
||||
assert data["total_size_bytes"] >= 0
|
||||
assert data["unique_artifacts"] >= 0
|
||||
assert data["orphaned_artifacts"] >= 0
|
||||
assert data["total_uploads"] >= 0
|
||||
assert data["deduplicated_uploads"] >= 0
|
||||
assert data["deduplication_ratio"] >= 0
|
||||
assert data["storage_saved_bytes"] >= 0
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_stats_update_after_upload(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test stats update after uploading an artifact."""
|
||||
project, package = test_package
|
||||
|
||||
# Get initial stats
|
||||
initial_response = integration_client.get("/api/v1/stats")
|
||||
initial_stats = initial_response.json()
|
||||
|
||||
# Upload a new file
|
||||
content = f"stats test content {unique_test_id}".encode()
|
||||
upload_test_file(
|
||||
integration_client, project, package, content, tag=f"stats-{unique_test_id}"
|
||||
)
|
||||
|
||||
# Get updated stats
|
||||
updated_response = integration_client.get("/api/v1/stats")
|
||||
updated_stats = updated_response.json()
|
||||
|
||||
# Verify stats increased
|
||||
assert updated_stats["total_uploads"] >= initial_stats["total_uploads"]
|
||||
|
||||
|
||||
class TestDeduplicationStats:
|
||||
"""Tests for GET /api/v1/stats/deduplication endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_dedup_stats_returns_valid_response(self, integration_client):
|
||||
"""Test deduplication stats returns expected fields."""
|
||||
response = integration_client.get("/api/v1/stats/deduplication")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "total_logical_bytes" in data
|
||||
assert "total_physical_bytes" in data
|
||||
assert "bytes_saved" in data
|
||||
assert "savings_percentage" in data
|
||||
assert "total_uploads" in data
|
||||
assert "unique_artifacts" in data
|
||||
assert "duplicate_uploads" in data
|
||||
assert "average_ref_count" in data
|
||||
assert "max_ref_count" in data
|
||||
assert "most_referenced_artifacts" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_most_referenced_artifacts_format(self, integration_client):
|
||||
"""Test most_referenced_artifacts has correct structure."""
|
||||
response = integration_client.get("/api/v1/stats/deduplication")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
artifacts = data["most_referenced_artifacts"]
|
||||
assert isinstance(artifacts, list)
|
||||
|
||||
if len(artifacts) > 0:
|
||||
artifact = artifacts[0]
|
||||
assert "artifact_id" in artifact
|
||||
assert "ref_count" in artifact
|
||||
assert "size" in artifact
|
||||
assert "storage_saved" in artifact
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_dedup_stats_with_top_n_param(self, integration_client):
|
||||
"""Test deduplication stats respects top_n parameter."""
|
||||
response = integration_client.get("/api/v1/stats/deduplication?top_n=3")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert len(data["most_referenced_artifacts"]) <= 3
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_savings_percentage_valid_range(self, integration_client):
|
||||
"""Test savings percentage is between 0 and 100."""
|
||||
response = integration_client.get("/api/v1/stats/deduplication")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert 0 <= data["savings_percentage"] <= 100
|
||||
|
||||
|
||||
class TestCrossProjectStats:
|
||||
"""Tests for GET /api/v1/stats/cross-project endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cross_project_returns_valid_response(self, integration_client):
|
||||
"""Test cross-project stats returns expected fields."""
|
||||
response = integration_client.get("/api/v1/stats/cross-project")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "shared_artifacts_count" in data
|
||||
assert "total_cross_project_savings" in data
|
||||
assert "shared_artifacts" in data
|
||||
assert isinstance(data["shared_artifacts"], list)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cross_project_respects_limit(self, integration_client):
|
||||
"""Test cross-project stats respects limit parameter."""
|
||||
response = integration_client.get("/api/v1/stats/cross-project?limit=5")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert len(data["shared_artifacts"]) <= 5
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_cross_project_detects_shared_artifacts(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test cross-project deduplication is detected."""
|
||||
content = f"shared across projects {unique_test_id}".encode()
|
||||
|
||||
# Create two projects
|
||||
proj1 = f"cross-proj-a-{unique_test_id}"
|
||||
proj2 = f"cross-proj-b-{unique_test_id}"
|
||||
|
||||
try:
|
||||
# Create projects and packages
|
||||
integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": proj1, "description": "Test", "is_public": True},
|
||||
)
|
||||
integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": proj2, "description": "Test", "is_public": True},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{proj1}/packages",
|
||||
json={"name": "pkg", "description": "Test"},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{proj2}/packages",
|
||||
json={"name": "pkg", "description": "Test"},
|
||||
)
|
||||
|
||||
# Upload same content to both projects
|
||||
upload_test_file(integration_client, proj1, "pkg", content, tag="v1")
|
||||
upload_test_file(integration_client, proj2, "pkg", content, tag="v1")
|
||||
|
||||
# Check cross-project stats
|
||||
response = integration_client.get("/api/v1/stats/cross-project")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["shared_artifacts_count"] >= 1
|
||||
|
||||
finally:
|
||||
# Cleanup
|
||||
integration_client.delete(f"/api/v1/projects/{proj1}")
|
||||
integration_client.delete(f"/api/v1/projects/{proj2}")
|
||||
|
||||
|
||||
class TestTimelineStats:
|
||||
"""Tests for GET /api/v1/stats/timeline endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_timeline_returns_valid_response(self, integration_client):
|
||||
"""Test timeline stats returns expected fields."""
|
||||
response = integration_client.get("/api/v1/stats/timeline")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "period" in data
|
||||
assert "start_date" in data
|
||||
assert "end_date" in data
|
||||
assert "data_points" in data
|
||||
assert isinstance(data["data_points"], list)
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_timeline_daily_period(self, integration_client):
|
||||
"""Test timeline with daily period."""
|
||||
response = integration_client.get("/api/v1/stats/timeline?period=daily")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["period"] == "daily"
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_timeline_weekly_period(self, integration_client):
|
||||
"""Test timeline with weekly period."""
|
||||
response = integration_client.get("/api/v1/stats/timeline?period=weekly")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["period"] == "weekly"
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_timeline_monthly_period(self, integration_client):
|
||||
"""Test timeline with monthly period."""
|
||||
response = integration_client.get("/api/v1/stats/timeline?period=monthly")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["period"] == "monthly"
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_timeline_invalid_period_rejected(self, integration_client):
|
||||
"""Test timeline rejects invalid period."""
|
||||
response = integration_client.get("/api/v1/stats/timeline?period=invalid")
|
||||
assert response.status_code == 422
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_timeline_data_point_structure(self, integration_client):
|
||||
"""Test timeline data points have correct structure."""
|
||||
response = integration_client.get("/api/v1/stats/timeline")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
if len(data["data_points"]) > 0:
|
||||
point = data["data_points"][0]
|
||||
assert "date" in point
|
||||
assert "total_uploads" in point
|
||||
assert "unique_artifacts" in point
|
||||
assert "duplicated_uploads" in point
|
||||
assert "bytes_saved" in point
|
||||
|
||||
|
||||
class TestExportEndpoint:
|
||||
"""Tests for GET /api/v1/stats/export endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_export_json_format(self, integration_client):
|
||||
"""Test export with JSON format."""
|
||||
response = integration_client.get("/api/v1/stats/export?format=json")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "total_artifacts" in data
|
||||
assert "generated_at" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_export_csv_format(self, integration_client):
|
||||
"""Test export with CSV format."""
|
||||
response = integration_client.get("/api/v1/stats/export?format=csv")
|
||||
assert response.status_code == 200
|
||||
assert "text/csv" in response.headers.get("content-type", "")
|
||||
|
||||
content = response.text
|
||||
assert "Metric,Value" in content
|
||||
assert "total_artifacts" in content
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_export_invalid_format_rejected(self, integration_client):
|
||||
"""Test export rejects invalid format."""
|
||||
response = integration_client.get("/api/v1/stats/export?format=xml")
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
class TestReportEndpoint:
|
||||
"""Tests for GET /api/v1/stats/report endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_report_markdown_format(self, integration_client):
|
||||
"""Test report with markdown format."""
|
||||
response = integration_client.get("/api/v1/stats/report?format=markdown")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["format"] == "markdown"
|
||||
assert "generated_at" in data
|
||||
assert "content" in data
|
||||
assert "# Orchard Storage Report" in data["content"]
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_report_json_format(self, integration_client):
|
||||
"""Test report with JSON format."""
|
||||
response = integration_client.get("/api/v1/stats/report?format=json")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert data["format"] == "json"
|
||||
assert "content" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_report_contains_sections(self, integration_client):
|
||||
"""Test markdown report contains expected sections."""
|
||||
response = integration_client.get("/api/v1/stats/report?format=markdown")
|
||||
assert response.status_code == 200
|
||||
|
||||
content = response.json()["content"]
|
||||
assert "## Overview" in content
|
||||
assert "## Storage" in content
|
||||
assert "## Uploads" in content
|
||||
|
||||
|
||||
class TestProjectStats:
|
||||
"""Tests for GET /api/v1/projects/:project/stats endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_project_stats_returns_valid_response(
|
||||
self, integration_client, test_project
|
||||
):
|
||||
"""Test project stats returns expected fields."""
|
||||
response = integration_client.get(f"/api/v1/projects/{test_project}/stats")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "project_id" in data
|
||||
assert "project_name" in data
|
||||
assert "package_count" in data
|
||||
assert "tag_count" in data
|
||||
assert "artifact_count" in data
|
||||
assert "total_size_bytes" in data
|
||||
assert "upload_count" in data
|
||||
assert "deduplicated_uploads" in data
|
||||
assert "storage_saved_bytes" in data
|
||||
assert "deduplication_ratio" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_project_stats_not_found(self, integration_client):
|
||||
"""Test project stats returns 404 for non-existent project."""
|
||||
response = integration_client.get("/api/v1/projects/nonexistent-project/stats")
|
||||
assert response.status_code == 404
|
||||
|
||||
|
||||
class TestPackageStats:
|
||||
"""Tests for GET /api/v1/project/:project/packages/:package/stats endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_package_stats_returns_valid_response(
|
||||
self, integration_client, test_package
|
||||
):
|
||||
"""Test package stats returns expected fields."""
|
||||
project, package = test_package
|
||||
response = integration_client.get(
|
||||
f"/api/v1/project/{project}/packages/{package}/stats"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "package_id" in data
|
||||
assert "package_name" in data
|
||||
assert "project_name" in data
|
||||
assert "tag_count" in data
|
||||
assert "artifact_count" in data
|
||||
assert "total_size_bytes" in data
|
||||
assert "upload_count" in data
|
||||
assert "deduplicated_uploads" in data
|
||||
assert "storage_saved_bytes" in data
|
||||
assert "deduplication_ratio" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_package_stats_not_found(self, integration_client, test_project):
|
||||
"""Test package stats returns 404 for non-existent package."""
|
||||
response = integration_client.get(
|
||||
f"/api/v1/project/{test_project}/packages/nonexistent-package/stats"
|
||||
)
|
||||
assert response.status_code == 404
|
||||
|
||||
|
||||
class TestArtifactStats:
|
||||
"""Tests for GET /api/v1/artifact/:id/stats endpoint."""
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_stats_returns_valid_response(
|
||||
self, integration_client, test_package, unique_test_id
|
||||
):
|
||||
"""Test artifact stats returns expected fields."""
|
||||
project, package = test_package
|
||||
content = f"artifact stats test {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
# Upload artifact
|
||||
upload_test_file(
|
||||
integration_client, project, package, content, tag=f"art-{unique_test_id}"
|
||||
)
|
||||
|
||||
# Get artifact stats
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}/stats")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert "artifact_id" in data
|
||||
assert "sha256" in data
|
||||
assert "size" in data
|
||||
assert "ref_count" in data
|
||||
assert "storage_savings" in data
|
||||
assert "tags" in data
|
||||
assert "projects" in data
|
||||
assert "packages" in data
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_stats_not_found(self, integration_client):
|
||||
"""Test artifact stats returns 404 for non-existent artifact."""
|
||||
fake_hash = "0" * 64
|
||||
response = integration_client.get(f"/api/v1/artifact/{fake_hash}/stats")
|
||||
assert response.status_code == 404
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_artifact_stats_shows_correct_projects(
|
||||
self, integration_client, unique_test_id
|
||||
):
|
||||
"""Test artifact stats shows all projects using the artifact."""
|
||||
content = f"multi-project artifact {unique_test_id}".encode()
|
||||
expected_hash = compute_sha256(content)
|
||||
|
||||
proj1 = f"art-stats-a-{unique_test_id}"
|
||||
proj2 = f"art-stats-b-{unique_test_id}"
|
||||
|
||||
try:
|
||||
# Create projects and packages
|
||||
integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": proj1, "description": "Test", "is_public": True},
|
||||
)
|
||||
integration_client.post(
|
||||
"/api/v1/projects",
|
||||
json={"name": proj2, "description": "Test", "is_public": True},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{proj1}/packages",
|
||||
json={"name": "pkg", "description": "Test"},
|
||||
)
|
||||
integration_client.post(
|
||||
f"/api/v1/project/{proj2}/packages",
|
||||
json={"name": "pkg", "description": "Test"},
|
||||
)
|
||||
|
||||
# Upload same content to both projects
|
||||
upload_test_file(integration_client, proj1, "pkg", content, tag="v1")
|
||||
upload_test_file(integration_client, proj2, "pkg", content, tag="v1")
|
||||
|
||||
# Check artifact stats
|
||||
response = integration_client.get(f"/api/v1/artifact/{expected_hash}/stats")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert len(data["projects"]) == 2
|
||||
assert proj1 in data["projects"]
|
||||
assert proj2 in data["projects"]
|
||||
|
||||
finally:
|
||||
integration_client.delete(f"/api/v1/projects/{proj1}")
|
||||
integration_client.delete(f"/api/v1/projects/{proj2}")
|
||||
575
docs/design/deduplication-design.md
Normal file
575
docs/design/deduplication-design.md
Normal file
@@ -0,0 +1,575 @@
|
||||
# Deduplication Design Document
|
||||
|
||||
This document defines Orchard's content-addressable storage and deduplication approach using SHA256 hashes.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Overview](#overview)
|
||||
2. [Hash Algorithm Selection](#hash-algorithm-selection)
|
||||
3. [Content-Addressable Storage Model](#content-addressable-storage-model)
|
||||
4. [S3 Key Derivation](#s3-key-derivation)
|
||||
5. [Duplicate Detection Strategy](#duplicate-detection-strategy)
|
||||
6. [Reference Counting Lifecycle](#reference-counting-lifecycle)
|
||||
7. [Edge Cases and Error Handling](#edge-cases-and-error-handling)
|
||||
8. [Collision Handling](#collision-handling)
|
||||
9. [Performance Considerations](#performance-considerations)
|
||||
10. [Operations Runbook](#operations-runbook)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Orchard uses **whole-file deduplication** based on content hashing. When a file is uploaded:
|
||||
|
||||
1. The SHA256 hash of the entire file content is computed
|
||||
2. The hash becomes the artifact's primary identifier
|
||||
3. If a file with the same hash already exists, no duplicate is stored
|
||||
4. Multiple tags/references can point to the same artifact
|
||||
|
||||
**Scope:** Orchard implements whole-file deduplication only. Chunk-level or block-level deduplication is out of scope for MVP.
|
||||
|
||||
---
|
||||
|
||||
## Hash Algorithm Selection
|
||||
|
||||
### Decision: SHA256
|
||||
|
||||
| Criteria | SHA256 | SHA1 | MD5 | Blake3 |
|
||||
|----------|--------|------|-----|--------|
|
||||
| Security | Strong (256-bit) | Weak (broken) | Weak (broken) | Strong |
|
||||
| Speed | ~400 MB/s | ~600 MB/s | ~800 MB/s | ~1500 MB/s |
|
||||
| Collision Resistance | 2^128 | Broken | Broken | 2^128 |
|
||||
| Industry Adoption | Universal | Legacy | Legacy | Emerging |
|
||||
| Tool Ecosystem | Excellent | Good | Good | Growing |
|
||||
|
||||
### Rationale
|
||||
|
||||
1. **Security**: SHA256 has no known practical collision attacks. SHA1 and MD5 are cryptographically broken.
|
||||
|
||||
2. **Collision Resistance**: With 256-bit output, the probability of accidental collision is approximately 2^-128 (~10^-38). To have a 50% chance of collision, you would need approximately 2^128 unique files.
|
||||
|
||||
3. **Industry Standard**: SHA256 is the de facto standard for content-addressable storage (Git, Docker, npm, etc.).
|
||||
|
||||
4. **Performance**: While Blake3 is faster, SHA256 throughput (~400 MB/s) exceeds typical network bandwidth for uploads. The bottleneck is I/O, not hashing.
|
||||
|
||||
5. **Tooling**: Universal support in all languages, operating systems, and verification tools.
|
||||
|
||||
### Migration Path
|
||||
|
||||
If a future algorithm change is needed (e.g., SHA3 or Blake3):
|
||||
|
||||
1. **Database**: Add `hash_algorithm` column to artifacts table (default: 'sha256')
|
||||
2. **S3 Keys**: New algorithm uses different prefix (e.g., `fruits-sha3/` vs `fruits/`)
|
||||
3. **API**: Accept algorithm hint in upload, return algorithm in responses
|
||||
4. **Migration**: Background job to re-hash existing artifacts if needed
|
||||
|
||||
**Current Implementation**: Single algorithm (SHA256), no algorithm versioning required for MVP.
|
||||
|
||||
---
|
||||
|
||||
## Content-Addressable Storage Model
|
||||
|
||||
### Core Principles
|
||||
|
||||
1. **Identity = Content**: The artifact ID IS the SHA256 hash of its content
|
||||
2. **Immutability**: Content cannot change after storage (same hash = same content)
|
||||
3. **Deduplication**: Same content uploaded twice results in single storage
|
||||
4. **Metadata Independence**: Files with identical content but different names/types are deduplicated
|
||||
|
||||
### Data Model
|
||||
|
||||
```
|
||||
Artifact {
|
||||
id: VARCHAR(64) PRIMARY KEY -- SHA256 hash (lowercase hex)
|
||||
size: BIGINT -- File size in bytes
|
||||
ref_count: INTEGER -- Number of references
|
||||
s3_key: VARCHAR(1024) -- S3 storage path
|
||||
checksum_md5: VARCHAR(32) -- Secondary checksum
|
||||
checksum_sha1: VARCHAR(40) -- Secondary checksum
|
||||
...
|
||||
}
|
||||
|
||||
Tag {
|
||||
id: UUID PRIMARY KEY
|
||||
name: VARCHAR(255)
|
||||
package_id: UUID FK
|
||||
artifact_id: VARCHAR(64) FK -- Points to Artifact.id (SHA256)
|
||||
}
|
||||
```
|
||||
|
||||
### Hash Format
|
||||
|
||||
- Algorithm: SHA256
|
||||
- Output: 64 lowercase hexadecimal characters
|
||||
- Example: `dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f`
|
||||
|
||||
---
|
||||
|
||||
## S3 Key Derivation
|
||||
|
||||
### Key Structure
|
||||
|
||||
```
|
||||
fruits/{hash[0:2]}/{hash[2:4]}/{full_hash}
|
||||
```
|
||||
|
||||
Example for hash `dffd6021bb2bd5b0...`:
|
||||
```
|
||||
fruits/df/fd/dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f
|
||||
```
|
||||
|
||||
### Rationale for Prefix Sharding
|
||||
|
||||
1. **S3 Performance**: S3 partitions by key prefix. Distributing across prefixes improves throughput.
|
||||
|
||||
2. **Filesystem Compatibility**: When using filesystem-backed storage, avoids single directory with millions of files.
|
||||
|
||||
3. **Distribution**: With 2-character prefixes (256 combinations each level), provides 65,536 (256 x 256) top-level buckets.
|
||||
|
||||
### Bucket Distribution Analysis
|
||||
|
||||
Assuming uniformly distributed SHA256 hashes:
|
||||
|
||||
| Artifacts | Files per Prefix (avg) | Max per Prefix (99.9%) |
|
||||
|-----------|------------------------|------------------------|
|
||||
| 100,000 | 1.5 | 10 |
|
||||
| 1,000,000 | 15 | 50 |
|
||||
| 10,000,000 | 152 | 250 |
|
||||
| 100,000,000 | 1,525 | 2,000 |
|
||||
|
||||
The two-level prefix provides excellent distribution up to hundreds of millions of artifacts.
|
||||
|
||||
---
|
||||
|
||||
## Duplicate Detection Strategy
|
||||
|
||||
### Upload Flow
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ UPLOAD REQUEST │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 1. VALIDATE: Check file size limits (min/max) │
|
||||
│ - Empty files (0 bytes) → Reject with 422 │
|
||||
│ - Exceeds max_file_size → Reject with 413 │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 2. COMPUTE HASH: Stream file through SHA256/MD5/SHA1 │
|
||||
│ - Use 8MB chunks for memory efficiency │
|
||||
│ - Single pass for all three hashes │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 3. DERIVE S3 KEY: fruits/{hash[0:2]}/{hash[2:4]}/{hash} │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 4. CHECK EXISTENCE: HEAD request to S3 for derived key │
|
||||
│ - Retry up to 3 times on transient failures │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
┌───────────────┴───────────────┐
|
||||
▼ ▼
|
||||
┌─────────────────────────┐ ┌─────────────────────────────────┐
|
||||
│ EXISTS: Deduplicated │ │ NOT EXISTS: Upload to S3 │
|
||||
│ - Verify size matches │ │ - PUT object (or multipart) │
|
||||
│ - Skip S3 upload │ │ - Abort on failure │
|
||||
│ - Log saved bytes │ └─────────────────────────────────┘
|
||||
└─────────────────────────┘ │
|
||||
│ │
|
||||
└───────────────┬───────────────┘
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 5. DATABASE: Create/update artifact record │
|
||||
│ - Use row locking to prevent race conditions │
|
||||
│ - ref_count managed by SQL triggers │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ 6. CREATE TAG: If tag provided, create/update tag │
|
||||
│ - SQL trigger increments ref_count │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Hash Computation
|
||||
|
||||
**Memory Requirements:**
|
||||
- Chunk size: 8MB (`HASH_CHUNK_SIZE`)
|
||||
- Working memory: ~25MB (8MB chunk + hash states)
|
||||
- Independent of file size (streaming)
|
||||
|
||||
**Throughput:**
|
||||
- SHA256 alone: ~400 MB/s on modern CPU
|
||||
- With MD5 + SHA1: ~300 MB/s (parallel computation)
|
||||
- Typical bottleneck: Network I/O, not CPU
|
||||
|
||||
### Multipart Upload Threshold
|
||||
|
||||
Files larger than 100MB use S3 multipart upload:
|
||||
- First pass: Stream to compute hashes
|
||||
- If not duplicate: Seek to start, upload in 10MB parts
|
||||
- On failure: Abort multipart upload (no orphaned parts)
|
||||
|
||||
---
|
||||
|
||||
## Reference Counting Lifecycle
|
||||
|
||||
### What Constitutes a "Reference"
|
||||
|
||||
A reference is a **Tag** pointing to an artifact. Each tag increments the ref_count by 1.
|
||||
|
||||
**Uploads do NOT directly increment ref_count** - only tag creation does.
|
||||
|
||||
### Lifecycle
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE: New artifact uploaded │
|
||||
│ - ref_count = 0 (no tags yet) │
|
||||
│ - Artifact exists but is "orphaned" │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ TAG CREATED: Tag points to artifact │
|
||||
│ - SQL trigger: ref_count += 1 │
|
||||
│ - Artifact is now referenced │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ TAG UPDATED: Tag moved to different artifact │
|
||||
│ - SQL trigger on old artifact: ref_count -= 1 │
|
||||
│ - SQL trigger on new artifact: ref_count += 1 │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ TAG DELETED: Tag removed │
|
||||
│ - SQL trigger: ref_count -= 1 │
|
||||
│ - If ref_count = 0, artifact is orphaned │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ GARBAGE COLLECTION: Clean up orphaned artifacts │
|
||||
│ - Triggered manually via admin endpoint │
|
||||
│ - Finds artifacts where ref_count = 0 │
|
||||
│ - Deletes from S3 and database │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### SQL Triggers
|
||||
|
||||
Three triggers manage ref_count automatically:
|
||||
|
||||
1. **`tags_ref_count_insert_trigger`**: On tag INSERT, increment target artifact's ref_count
|
||||
2. **`tags_ref_count_delete_trigger`**: On tag DELETE, decrement target artifact's ref_count
|
||||
3. **`tags_ref_count_update_trigger`**: On tag UPDATE (artifact_id changed), decrement old, increment new
|
||||
|
||||
### Garbage Collection
|
||||
|
||||
**Trigger**: Manual admin endpoint (`POST /api/v1/admin/garbage-collect`)
|
||||
|
||||
**Process**:
|
||||
1. Query artifacts where `ref_count = 0`
|
||||
2. For each orphan:
|
||||
- Delete from S3 (`DELETE fruits/xx/yy/hash`)
|
||||
- Delete from database
|
||||
- Log deletion
|
||||
|
||||
**Safety**:
|
||||
- Dry-run mode by default (`?dry_run=true`)
|
||||
- Limit per run (`?limit=100`)
|
||||
- Check constraint prevents ref_count < 0
|
||||
|
||||
---
|
||||
|
||||
## Edge Cases and Error Handling
|
||||
|
||||
### Empty Files
|
||||
|
||||
- **Behavior**: Rejected with HTTP 422
|
||||
- **Reason**: Empty content has deterministic hash but provides no value
|
||||
- **Error**: "Empty files are not allowed"
|
||||
|
||||
### Maximum File Size
|
||||
|
||||
- **Default Limit**: 10GB (`ORCHARD_MAX_FILE_SIZE`)
|
||||
- **Configurable**: Via environment variable
|
||||
- **Behavior**: Rejected with HTTP 413 before upload begins
|
||||
- **Error**: "File too large. Maximum size is 10GB"
|
||||
|
||||
### Concurrent Upload of Same Content
|
||||
|
||||
**Race Condition Scenario**: Two clients upload identical content simultaneously.
|
||||
|
||||
**Handling**:
|
||||
1. **S3 Level**: Both compute same hash, both check existence, both may upload
|
||||
2. **Database Level**: Row-level locking with `SELECT ... FOR UPDATE`
|
||||
3. **Outcome**: One creates artifact, other sees it exists, both succeed
|
||||
4. **Trigger Safety**: SQL triggers are atomic per row
|
||||
|
||||
**No Data Corruption**: S3 is eventually consistent; identical content = identical result.
|
||||
|
||||
### Upload Interrupted
|
||||
|
||||
**Scenario**: Upload fails after hash computed but before S3 write completes.
|
||||
|
||||
**Simple Upload**:
|
||||
- S3 put_object is atomic - either completes or fails entirely
|
||||
- No cleanup needed
|
||||
|
||||
**Multipart Upload**:
|
||||
- On any failure, `abort_multipart_upload` is called
|
||||
- S3 cleans up partial parts
|
||||
- No orphaned data
|
||||
|
||||
### DB Exists but S3 Missing
|
||||
|
||||
**Detection**: Download request finds artifact in DB but S3 returns 404.
|
||||
|
||||
**Current Behavior**: Return 500 error to client.
|
||||
|
||||
**Recovery Options** (not yet implemented):
|
||||
1. Mark artifact for re-upload (set flag, notify admins)
|
||||
2. Decrement ref_count to trigger garbage collection
|
||||
3. Return specific error code for client retry
|
||||
|
||||
**Recommended**: Log critical alert, return 503 with retry hint.
|
||||
|
||||
### S3 Exists but DB Missing
|
||||
|
||||
**Detection**: Orphan - file in S3 with no corresponding DB record.
|
||||
|
||||
**Cause**:
|
||||
- Failed transaction after S3 upload
|
||||
- Manual S3 manipulation
|
||||
- Database restore from backup
|
||||
|
||||
**Recovery**:
|
||||
- Garbage collection won't delete (no DB record to query)
|
||||
- Requires S3 bucket scan + DB reconciliation
|
||||
- Manual admin task (out of scope for MVP)
|
||||
|
||||
### Network Timeout During Existence Check
|
||||
|
||||
**Behavior**: Retry up to 3 times with adaptive backoff.
|
||||
|
||||
**After Retries Exhausted**: Raise `S3ExistenceCheckError`, return 503 to client.
|
||||
|
||||
**Rationale**: Don't upload without knowing if duplicate exists (prevents orphans).
|
||||
|
||||
---
|
||||
|
||||
## Collision Handling
|
||||
|
||||
### SHA256 Collision Probability
|
||||
|
||||
For random inputs, the probability of collision is:
|
||||
|
||||
```
|
||||
P(collision) ≈ n² / 2^257
|
||||
|
||||
Where n = number of unique files
|
||||
```
|
||||
|
||||
| Files | Collision Probability |
|
||||
|-------|----------------------|
|
||||
| 10^9 (1 billion) | 10^-59 |
|
||||
| 10^12 (1 trillion) | 10^-53 |
|
||||
| 10^18 | 10^-41 |
|
||||
|
||||
**Practical Assessment**: You would need to store more files than atoms in the observable universe to have meaningful collision risk.
|
||||
|
||||
### Detection Mechanism
|
||||
|
||||
Despite near-zero probability, we detect potential collisions by:
|
||||
|
||||
1. **Size Comparison**: If hash matches but sizes differ, CRITICAL alert
|
||||
2. **ETag Verification**: S3 ETag provides secondary check
|
||||
|
||||
### Handling Procedure
|
||||
|
||||
If collision detected (size mismatch):
|
||||
|
||||
1. **Log CRITICAL alert** with full details
|
||||
2. **Reject upload** with 500 error
|
||||
3. **Do NOT overwrite** existing content
|
||||
4. **Notify operations** for manual investigation
|
||||
|
||||
```python
|
||||
raise HashCollisionError(
|
||||
f"Hash collision detected for {sha256_hash}: size mismatch"
|
||||
)
|
||||
```
|
||||
|
||||
### MVP Position
|
||||
|
||||
For MVP, we:
|
||||
- Detect collisions via size mismatch
|
||||
- Log and alert on detection
|
||||
- Reject conflicting upload
|
||||
- Accept that true collisions are practically impossible
|
||||
|
||||
No active mitigation (e.g., storing hash + size as composite key) is needed.
|
||||
|
||||
---
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Hash Computation Overhead
|
||||
|
||||
| File Size | Hash Time | Upload Time (100 Mbps) | Overhead |
|
||||
|-----------|-----------|------------------------|----------|
|
||||
| 10 MB | 25ms | 800ms | 3% |
|
||||
| 100 MB | 250ms | 8s | 3% |
|
||||
| 1 GB | 2.5s | 80s | 3% |
|
||||
| 10 GB | 25s | 800s | 3% |
|
||||
|
||||
**Conclusion**: Hash computation adds ~3% overhead regardless of file size. Network I/O dominates.
|
||||
|
||||
### Existence Check Overhead
|
||||
|
||||
- S3 HEAD request: ~50-100ms per call
|
||||
- Cached in future: Could use Redis/memory cache for hot paths
|
||||
- Current MVP: No caching (acceptable for expected load)
|
||||
|
||||
### Deduplication Savings
|
||||
|
||||
Example with 50% duplication rate:
|
||||
|
||||
| Metric | Without Dedup | With Dedup | Savings |
|
||||
|--------|---------------|------------|---------|
|
||||
| Storage (100K files, 10MB avg) | 1 TB | 500 GB | 50% |
|
||||
| Upload bandwidth | 1 TB | 500 GB | 50% |
|
||||
| S3 costs | $23/mo | $11.50/mo | 50% |
|
||||
|
||||
---
|
||||
|
||||
## Operations Runbook
|
||||
|
||||
### Monitoring Deduplication
|
||||
|
||||
```bash
|
||||
# View deduplication stats
|
||||
curl http://orchard:8080/api/v1/stats/deduplication
|
||||
|
||||
# Response includes:
|
||||
# - deduplication_ratio
|
||||
# - total_uploads, deduplicated_uploads
|
||||
# - bytes_saved
|
||||
```
|
||||
|
||||
### Checking for Orphaned Artifacts
|
||||
|
||||
```bash
|
||||
# List orphaned artifacts (ref_count = 0)
|
||||
curl http://orchard:8080/api/v1/admin/orphaned-artifacts
|
||||
|
||||
# Dry-run garbage collection
|
||||
curl -X POST "http://orchard:8080/api/v1/admin/garbage-collect?dry_run=true"
|
||||
|
||||
# Execute garbage collection
|
||||
curl -X POST "http://orchard:8080/api/v1/admin/garbage-collect?dry_run=false"
|
||||
```
|
||||
|
||||
### Verifying Artifact Integrity
|
||||
|
||||
```bash
|
||||
# Download and verify hash matches artifact ID
|
||||
ARTIFACT_ID="dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f"
|
||||
curl -O http://orchard:8080/api/v1/artifact/$ARTIFACT_ID/download
|
||||
COMPUTED=$(sha256sum downloaded_file | cut -d' ' -f1)
|
||||
[ "$ARTIFACT_ID" = "$COMPUTED" ] && echo "OK" || echo "INTEGRITY FAILURE"
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
| Symptom | Likely Cause | Resolution |
|
||||
|---------|--------------|------------|
|
||||
| "Hash computation error" | Empty file or read error | Check file content, retry |
|
||||
| "Storage unavailable" | S3/MinIO down | Check S3 health, retry |
|
||||
| "File too large" | Exceeds max_file_size | Adjust config or use chunked upload |
|
||||
| "Hash collision detected" | Extremely rare | Investigate, do not ignore |
|
||||
| Orphaned artifacts accumulating | Tags deleted, no GC run | Run garbage collection |
|
||||
| Download returns 404 | S3 object missing | Check S3 bucket, restore from backup |
|
||||
|
||||
### Configuration Reference
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `ORCHARD_MAX_FILE_SIZE` | 10GB | Maximum upload size |
|
||||
| `ORCHARD_MIN_FILE_SIZE` | 1 | Minimum upload size (rejects empty) |
|
||||
| `ORCHARD_S3_MAX_RETRIES` | 3 | Retry attempts for S3 operations |
|
||||
| `ORCHARD_S3_CONNECT_TIMEOUT` | 10s | S3 connection timeout |
|
||||
| `ORCHARD_S3_READ_TIMEOUT` | 60s | S3 read timeout |
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Decision Records
|
||||
|
||||
### ADR-001: SHA256 for Content Hashing
|
||||
|
||||
**Status**: Accepted
|
||||
|
||||
**Context**: Need deterministic content identifier for deduplication.
|
||||
|
||||
**Decision**: Use SHA256.
|
||||
|
||||
**Rationale**:
|
||||
- Cryptographically strong (no known attacks)
|
||||
- Universal adoption (Git, Docker, npm)
|
||||
- Sufficient speed for I/O-bound workloads
|
||||
- Excellent tooling
|
||||
|
||||
**Consequences**:
|
||||
- 64-character artifact IDs (longer than UUIDs)
|
||||
- CPU overhead ~3% of upload time
|
||||
- Future algorithm migration requires versioning
|
||||
|
||||
### ADR-002: Whole-File Deduplication Only
|
||||
|
||||
**Status**: Accepted
|
||||
|
||||
**Context**: Could implement chunk-level deduplication for better savings.
|
||||
|
||||
**Decision**: Whole-file only for MVP.
|
||||
|
||||
**Rationale**:
|
||||
- Simpler implementation
|
||||
- No chunking algorithm complexity
|
||||
- Sufficient for build artifact use case
|
||||
- Can add chunk-level later if needed
|
||||
|
||||
**Consequences**:
|
||||
- Files with partial overlap stored entirely
|
||||
- Large files with small changes not deduplicated
|
||||
- Acceptable for binary artifact workloads
|
||||
|
||||
### ADR-003: SQL Triggers for ref_count
|
||||
|
||||
**Status**: Accepted
|
||||
|
||||
**Context**: ref_count must be accurate for garbage collection.
|
||||
|
||||
**Decision**: Use PostgreSQL triggers, not application code.
|
||||
|
||||
**Rationale**:
|
||||
- Atomic with tag operations
|
||||
- Cannot be bypassed
|
||||
- Works regardless of client (API, direct SQL, migrations)
|
||||
- Simpler application code
|
||||
|
||||
**Consequences**:
|
||||
- Trigger logic in SQL (less visible)
|
||||
- Must maintain triggers across schema changes
|
||||
- Debugging requires database access
|
||||
@@ -3,12 +3,14 @@ import Layout from './components/Layout';
|
||||
import Home from './pages/Home';
|
||||
import ProjectPage from './pages/ProjectPage';
|
||||
import PackagePage from './pages/PackagePage';
|
||||
import Dashboard from './pages/Dashboard';
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<Layout>
|
||||
<Routes>
|
||||
<Route path="/" element={<Home />} />
|
||||
<Route path="/dashboard" element={<Dashboard />} />
|
||||
<Route path="/project/:projectName" element={<ProjectPage />} />
|
||||
<Route path="/project/:projectName/:packageName" element={<PackagePage />} />
|
||||
</Routes>
|
||||
|
||||
@@ -13,6 +13,10 @@ import {
|
||||
ArtifactListParams,
|
||||
ProjectListParams,
|
||||
GlobalSearchResponse,
|
||||
Stats,
|
||||
DeduplicationStats,
|
||||
TimelineStats,
|
||||
CrossProjectStats,
|
||||
} from './types';
|
||||
|
||||
const API_BASE = '/api/v1';
|
||||
@@ -156,3 +160,29 @@ export async function uploadArtifact(projectName: string, packageName: string, f
|
||||
export function getDownloadUrl(projectName: string, packageName: string, ref: string): string {
|
||||
return `${API_BASE}/project/${projectName}/${packageName}/+/${ref}`;
|
||||
}
|
||||
|
||||
// Stats API
|
||||
export async function getStats(): Promise<Stats> {
|
||||
const response = await fetch(`${API_BASE}/stats`);
|
||||
return handleResponse<Stats>(response);
|
||||
}
|
||||
|
||||
export async function getDeduplicationStats(): Promise<DeduplicationStats> {
|
||||
const response = await fetch(`${API_BASE}/stats/deduplication`);
|
||||
return handleResponse<DeduplicationStats>(response);
|
||||
}
|
||||
|
||||
export async function getTimelineStats(
|
||||
period: 'day' | 'week' | 'month' = 'day',
|
||||
fromDate?: string,
|
||||
toDate?: string
|
||||
): Promise<TimelineStats> {
|
||||
const params = buildQueryString({ period, from_date: fromDate, to_date: toDate });
|
||||
const response = await fetch(`${API_BASE}/stats/timeline${params}`);
|
||||
return handleResponse<TimelineStats>(response);
|
||||
}
|
||||
|
||||
export async function getCrossProjectStats(): Promise<CrossProjectStats> {
|
||||
const response = await fetch(`${API_BASE}/stats/cross-project`);
|
||||
return handleResponse<CrossProjectStats>(response);
|
||||
}
|
||||
|
||||
@@ -42,6 +42,15 @@ function Layout({ children }: LayoutProps) {
|
||||
</svg>
|
||||
Projects
|
||||
</Link>
|
||||
<Link to="/dashboard" className={location.pathname === '/dashboard' ? 'active' : ''}>
|
||||
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<rect x="3" y="3" width="7" height="7" rx="1"/>
|
||||
<rect x="14" y="3" width="7" height="7" rx="1"/>
|
||||
<rect x="3" y="14" width="7" height="7" rx="1"/>
|
||||
<rect x="14" y="14" width="7" height="7" rx="1"/>
|
||||
</svg>
|
||||
Dashboard
|
||||
</Link>
|
||||
<a href="/docs" className="nav-link-muted">
|
||||
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"/>
|
||||
|
||||
547
frontend/src/pages/Dashboard.css
Normal file
547
frontend/src/pages/Dashboard.css
Normal file
@@ -0,0 +1,547 @@
|
||||
.dashboard {
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.dashboard__header {
|
||||
position: relative;
|
||||
margin-bottom: 48px;
|
||||
padding-bottom: 32px;
|
||||
border-bottom: 1px solid var(--border-primary);
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.dashboard__header-content {
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.dashboard__header h1 {
|
||||
font-size: 2.5rem;
|
||||
font-weight: 700;
|
||||
color: var(--text-primary);
|
||||
letter-spacing: -0.03em;
|
||||
margin-bottom: 8px;
|
||||
background: linear-gradient(135deg, var(--text-primary) 0%, var(--accent-primary) 100%);
|
||||
-webkit-background-clip: text;
|
||||
-webkit-text-fill-color: transparent;
|
||||
background-clip: text;
|
||||
}
|
||||
|
||||
.dashboard__subtitle {
|
||||
font-size: 1rem;
|
||||
color: var(--text-tertiary);
|
||||
letter-spacing: -0.01em;
|
||||
}
|
||||
|
||||
.dashboard__header-accent {
|
||||
position: absolute;
|
||||
top: -100px;
|
||||
right: -100px;
|
||||
width: 400px;
|
||||
height: 400px;
|
||||
background: radial-gradient(circle, rgba(16, 185, 129, 0.08) 0%, transparent 70%);
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.dashboard__section {
|
||||
margin-bottom: 48px;
|
||||
}
|
||||
|
||||
.dashboard__section-title {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
font-size: 1.125rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
margin-bottom: 20px;
|
||||
letter-spacing: -0.01em;
|
||||
}
|
||||
|
||||
.dashboard__section-title svg {
|
||||
color: var(--accent-primary);
|
||||
}
|
||||
|
||||
.dashboard__section-description {
|
||||
color: var(--text-tertiary);
|
||||
font-size: 0.875rem;
|
||||
margin-bottom: 20px;
|
||||
margin-top: -8px;
|
||||
}
|
||||
|
||||
.stat-grid {
|
||||
display: grid;
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
.stat-grid--4 {
|
||||
grid-template-columns: repeat(4, 1fr);
|
||||
}
|
||||
|
||||
.stat-grid--3 {
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
}
|
||||
|
||||
.stat-grid--2 {
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
}
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
.stat-grid--4 {
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 640px) {
|
||||
.stat-grid--4,
|
||||
.stat-grid--3,
|
||||
.stat-grid--2 {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
}
|
||||
|
||||
.stat-card {
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
gap: 16px;
|
||||
background: var(--bg-secondary);
|
||||
border: 1px solid var(--border-primary);
|
||||
border-radius: var(--radius-lg);
|
||||
padding: 20px;
|
||||
transition: all var(--transition-normal);
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.stat-card::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
height: 3px;
|
||||
background: var(--border-secondary);
|
||||
transition: background var(--transition-normal);
|
||||
}
|
||||
|
||||
.stat-card:hover {
|
||||
border-color: var(--border-secondary);
|
||||
transform: translateY(-2px);
|
||||
box-shadow: var(--shadow-lg);
|
||||
}
|
||||
|
||||
.stat-card--success::before {
|
||||
background: var(--accent-gradient);
|
||||
}
|
||||
|
||||
.stat-card--success {
|
||||
background: linear-gradient(135deg, rgba(16, 185, 129, 0.03) 0%, transparent 50%);
|
||||
}
|
||||
|
||||
.stat-card--accent::before {
|
||||
background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);
|
||||
}
|
||||
|
||||
.stat-card--accent {
|
||||
background: linear-gradient(135deg, rgba(59, 130, 246, 0.03) 0%, transparent 50%);
|
||||
}
|
||||
|
||||
.stat-card__icon {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
border-radius: var(--radius-md);
|
||||
background: var(--bg-tertiary);
|
||||
color: var(--text-tertiary);
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.stat-card--success .stat-card__icon {
|
||||
background: rgba(16, 185, 129, 0.1);
|
||||
color: var(--accent-primary);
|
||||
}
|
||||
|
||||
.stat-card--accent .stat-card__icon {
|
||||
background: rgba(59, 130, 246, 0.1);
|
||||
color: #3b82f6;
|
||||
}
|
||||
|
||||
.stat-card__content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 2px;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.stat-card__label {
|
||||
font-size: 0.75rem;
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
color: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.stat-card__value {
|
||||
font-size: 1.75rem;
|
||||
font-weight: 700;
|
||||
color: var(--text-primary);
|
||||
letter-spacing: -0.02em;
|
||||
line-height: 1.2;
|
||||
display: flex;
|
||||
align-items: baseline;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.stat-card__subvalue {
|
||||
font-size: 0.75rem;
|
||||
color: var(--text-muted);
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.stat-card__trend {
|
||||
font-size: 0.875rem;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.stat-card__trend--up {
|
||||
color: var(--success);
|
||||
}
|
||||
|
||||
.stat-card__trend--down {
|
||||
color: var(--error);
|
||||
}
|
||||
|
||||
.progress-bar {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.progress-bar__header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
.progress-bar__label {
|
||||
font-size: 0.8125rem;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.progress-bar__percentage {
|
||||
font-size: 0.8125rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.progress-bar__track {
|
||||
position: relative;
|
||||
height: 8px;
|
||||
background: var(--bg-tertiary);
|
||||
border-radius: 100px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.progress-bar__fill {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
height: 100%;
|
||||
background: var(--border-secondary);
|
||||
border-radius: 100px;
|
||||
transition: width 0.5s ease-out;
|
||||
}
|
||||
|
||||
.progress-bar__glow {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
height: 100%;
|
||||
background: transparent;
|
||||
border-radius: 100px;
|
||||
transition: width 0.5s ease-out;
|
||||
}
|
||||
|
||||
.progress-bar--success .progress-bar__fill {
|
||||
background: var(--accent-gradient);
|
||||
}
|
||||
|
||||
.progress-bar--success .progress-bar__glow {
|
||||
box-shadow: 0 0 12px rgba(16, 185, 129, 0.4);
|
||||
}
|
||||
|
||||
.progress-bar--accent .progress-bar__fill {
|
||||
background: linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%);
|
||||
}
|
||||
|
||||
.effectiveness-grid {
|
||||
display: grid;
|
||||
grid-template-columns: 1.5fr 1fr;
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
@media (max-width: 900px) {
|
||||
.effectiveness-grid {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
}
|
||||
|
||||
.effectiveness-card {
|
||||
background: var(--bg-secondary);
|
||||
border: 1px solid var(--border-primary);
|
||||
border-radius: var(--radius-lg);
|
||||
padding: 24px;
|
||||
}
|
||||
|
||||
.effectiveness-card h3 {
|
||||
font-size: 0.875rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
margin-bottom: 24px;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
.storage-comparison {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 20px;
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
|
||||
.storage-bar__label {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 8px;
|
||||
font-size: 0.8125rem;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.storage-bar__value {
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
||||
}
|
||||
|
||||
.storage-savings {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
padding: 20px;
|
||||
background: linear-gradient(135deg, rgba(16, 185, 129, 0.08) 0%, rgba(5, 150, 105, 0.04) 100%);
|
||||
border: 1px solid rgba(16, 185, 129, 0.2);
|
||||
border-radius: var(--radius-md);
|
||||
}
|
||||
|
||||
.storage-savings__icon {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 56px;
|
||||
height: 56px;
|
||||
border-radius: 50%;
|
||||
background: var(--accent-gradient);
|
||||
color: white;
|
||||
flex-shrink: 0;
|
||||
box-shadow: 0 0 24px rgba(16, 185, 129, 0.3);
|
||||
}
|
||||
|
||||
.storage-savings__content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.storage-savings__value {
|
||||
font-size: 1.5rem;
|
||||
font-weight: 700;
|
||||
color: var(--accent-primary);
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
|
||||
.storage-savings__label {
|
||||
font-size: 0.8125rem;
|
||||
color: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.dedup-rate {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
gap: 24px;
|
||||
}
|
||||
|
||||
.dedup-rate__circle {
|
||||
position: relative;
|
||||
width: 160px;
|
||||
height: 160px;
|
||||
}
|
||||
|
||||
.dedup-rate__svg {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
|
||||
.dedup-rate__value {
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
display: flex;
|
||||
align-items: baseline;
|
||||
gap: 2px;
|
||||
}
|
||||
|
||||
.dedup-rate__number {
|
||||
font-size: 2.5rem;
|
||||
font-weight: 700;
|
||||
color: var(--text-primary);
|
||||
letter-spacing: -0.03em;
|
||||
}
|
||||
|
||||
.dedup-rate__symbol {
|
||||
font-size: 1.25rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.dedup-rate__details {
|
||||
display: flex;
|
||||
gap: 32px;
|
||||
}
|
||||
|
||||
.dedup-rate__detail {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.dedup-rate__detail-value {
|
||||
font-size: 1.25rem;
|
||||
font-weight: 700;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.dedup-rate__detail-label {
|
||||
font-size: 0.6875rem;
|
||||
color: var(--text-muted);
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.artifacts-table {
|
||||
margin-top: 16px;
|
||||
}
|
||||
|
||||
.artifact-link {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.artifact-link code {
|
||||
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
||||
font-size: 0.8125rem;
|
||||
padding: 4px 8px;
|
||||
background: var(--bg-tertiary);
|
||||
border-radius: var(--radius-sm);
|
||||
color: var(--accent-primary);
|
||||
transition: all var(--transition-fast);
|
||||
}
|
||||
|
||||
.artifact-link:hover code {
|
||||
background: rgba(16, 185, 129, 0.15);
|
||||
}
|
||||
|
||||
.artifact-name {
|
||||
max-width: 200px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
display: block;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.ref-count {
|
||||
display: inline-flex;
|
||||
align-items: baseline;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.ref-count__value {
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
.ref-count__label {
|
||||
font-size: 0.6875rem;
|
||||
color: var(--text-muted);
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
.storage-saved {
|
||||
color: var(--success);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.dashboard__loading {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 16px;
|
||||
padding: 80px 32px;
|
||||
color: var(--text-tertiary);
|
||||
}
|
||||
|
||||
.dashboard__loading-spinner {
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
border: 3px solid var(--border-primary);
|
||||
border-top-color: var(--accent-primary);
|
||||
border-radius: 50%;
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.dashboard__error {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 16px;
|
||||
padding: 80px 32px;
|
||||
text-align: center;
|
||||
background: var(--bg-secondary);
|
||||
border: 1px solid var(--border-primary);
|
||||
border-radius: var(--radius-lg);
|
||||
}
|
||||
|
||||
.dashboard__error svg {
|
||||
color: var(--error);
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.dashboard__error h3 {
|
||||
font-size: 1.25rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.dashboard__error p {
|
||||
color: var(--text-tertiary);
|
||||
max-width: 400px;
|
||||
}
|
||||
|
||||
.dashboard__error .btn {
|
||||
margin-top: 8px;
|
||||
}
|
||||
436
frontend/src/pages/Dashboard.tsx
Normal file
436
frontend/src/pages/Dashboard.tsx
Normal file
@@ -0,0 +1,436 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Link } from 'react-router-dom';
|
||||
import { Stats, DeduplicationStats, ReferencedArtifact } from '../types';
|
||||
import { getStats, getDeduplicationStats } from '../api';
|
||||
import { DataTable } from '../components/DataTable';
|
||||
import './Dashboard.css';
|
||||
|
||||
function formatBytes(bytes: number): string {
|
||||
if (bytes === 0) return '0 B';
|
||||
const k = 1024;
|
||||
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
|
||||
}
|
||||
|
||||
function formatNumber(num: number): string {
|
||||
return num.toLocaleString();
|
||||
}
|
||||
|
||||
function truncateHash(hash: string, length: number = 12): string {
|
||||
if (hash.length <= length) return hash;
|
||||
return `${hash.slice(0, length)}...`;
|
||||
}
|
||||
|
||||
interface StatCardProps {
|
||||
label: string;
|
||||
value: string;
|
||||
subvalue?: string;
|
||||
icon: React.ReactNode;
|
||||
variant?: 'default' | 'success' | 'accent';
|
||||
trend?: 'up' | 'down' | 'neutral';
|
||||
}
|
||||
|
||||
function StatCard({ label, value, subvalue, icon, variant = 'default', trend }: StatCardProps) {
|
||||
return (
|
||||
<div className={`stat-card stat-card--${variant}`}>
|
||||
<div className="stat-card__icon">{icon}</div>
|
||||
<div className="stat-card__content">
|
||||
<span className="stat-card__label">{label}</span>
|
||||
<span className="stat-card__value">
|
||||
{value}
|
||||
{trend && (
|
||||
<span className={`stat-card__trend stat-card__trend--${trend}`}>
|
||||
{trend === 'up' && '↑'}
|
||||
{trend === 'down' && '↓'}
|
||||
</span>
|
||||
)}
|
||||
</span>
|
||||
{subvalue && <span className="stat-card__subvalue">{subvalue}</span>}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
interface ProgressBarProps {
|
||||
value: number;
|
||||
max: number;
|
||||
label?: string;
|
||||
showPercentage?: boolean;
|
||||
variant?: 'default' | 'success' | 'accent';
|
||||
}
|
||||
|
||||
function ProgressBar({ value, max, label, showPercentage = true, variant = 'default' }: ProgressBarProps) {
|
||||
const percentage = max > 0 ? Math.min((value / max) * 100, 100) : 0;
|
||||
|
||||
return (
|
||||
<div className={`progress-bar progress-bar--${variant}`}>
|
||||
{label && (
|
||||
<div className="progress-bar__header">
|
||||
<span className="progress-bar__label">{label}</span>
|
||||
{showPercentage && <span className="progress-bar__percentage">{percentage.toFixed(1)}%</span>}
|
||||
</div>
|
||||
)}
|
||||
<div className="progress-bar__track">
|
||||
<div
|
||||
className="progress-bar__fill"
|
||||
style={{ width: `${percentage}%` }}
|
||||
/>
|
||||
<div className="progress-bar__glow" style={{ width: `${percentage}%` }} />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function Dashboard() {
|
||||
const [stats, setStats] = useState<Stats | null>(null);
|
||||
const [dedupStats, setDedupStats] = useState<DeduplicationStats | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
async function loadStats() {
|
||||
try {
|
||||
setLoading(true);
|
||||
const [statsData, dedupData] = await Promise.all([
|
||||
getStats(),
|
||||
getDeduplicationStats(),
|
||||
]);
|
||||
setStats(statsData);
|
||||
setDedupStats(dedupData);
|
||||
setError(null);
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to load statistics');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}
|
||||
loadStats();
|
||||
}, []);
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="dashboard">
|
||||
<div className="dashboard__loading">
|
||||
<div className="dashboard__loading-spinner" />
|
||||
<span>Loading statistics...</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<div className="dashboard">
|
||||
<div className="dashboard__error">
|
||||
<svg width="48" height="48" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.5">
|
||||
<circle cx="12" cy="12" r="10"/>
|
||||
<line x1="12" y1="8" x2="12" y2="12"/>
|
||||
<line x1="12" y1="16" x2="12.01" y2="16"/>
|
||||
</svg>
|
||||
<h3>Unable to load dashboard</h3>
|
||||
<p>{error}</p>
|
||||
<button className="btn btn-primary" onClick={() => window.location.reload()}>
|
||||
Try Again
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
const artifactColumns = [
|
||||
{
|
||||
key: 'artifact_id',
|
||||
header: 'Artifact ID',
|
||||
render: (item: ReferencedArtifact) => (
|
||||
<Link to={`/artifact/${item.artifact_id}`} className="artifact-link">
|
||||
<code>{truncateHash(item.artifact_id, 16)}</code>
|
||||
</Link>
|
||||
),
|
||||
},
|
||||
{
|
||||
key: 'original_name',
|
||||
header: 'Name',
|
||||
render: (item: ReferencedArtifact) => (
|
||||
<span className="artifact-name" title={item.original_name || 'Unknown'}>
|
||||
{item.original_name || '—'}
|
||||
</span>
|
||||
),
|
||||
},
|
||||
{
|
||||
key: 'size',
|
||||
header: 'Size',
|
||||
render: (item: ReferencedArtifact) => formatBytes(item.size),
|
||||
},
|
||||
{
|
||||
key: 'ref_count',
|
||||
header: 'References',
|
||||
render: (item: ReferencedArtifact) => (
|
||||
<span className="ref-count">
|
||||
<span className="ref-count__value">{formatNumber(item.ref_count)}</span>
|
||||
<span className="ref-count__label">refs</span>
|
||||
</span>
|
||||
),
|
||||
},
|
||||
{
|
||||
key: 'storage_saved',
|
||||
header: 'Storage Saved',
|
||||
render: (item: ReferencedArtifact) => (
|
||||
<span className="storage-saved">
|
||||
{formatBytes(item.storage_saved)}
|
||||
</span>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
return (
|
||||
<div className="dashboard">
|
||||
<header className="dashboard__header">
|
||||
<div className="dashboard__header-content">
|
||||
<h1>Storage Dashboard</h1>
|
||||
<p className="dashboard__subtitle">Real-time deduplication and storage analytics</p>
|
||||
</div>
|
||||
<div className="dashboard__header-accent" />
|
||||
</header>
|
||||
|
||||
<section className="dashboard__section">
|
||||
<h2 className="dashboard__section-title">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<path d="M21 16V8a2 2 0 0 0-1-1.73l-7-4a2 2 0 0 0-2 0l-7 4A2 2 0 0 0 3 8v8a2 2 0 0 0 1 1.73l7 4a2 2 0 0 0 2 0l7-4A2 2 0 0 0 21 16z"/>
|
||||
</svg>
|
||||
Storage Overview
|
||||
</h2>
|
||||
<div className="stat-grid stat-grid--4">
|
||||
<StatCard
|
||||
label="Total Storage Used"
|
||||
value={formatBytes(stats?.total_size_bytes || 0)}
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
||||
</svg>
|
||||
}
|
||||
variant="default"
|
||||
/>
|
||||
<StatCard
|
||||
label="Storage Saved"
|
||||
value={formatBytes(stats?.storage_saved_bytes || 0)}
|
||||
subvalue="through deduplication"
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<polyline points="23 6 13.5 15.5 8.5 10.5 1 18"/>
|
||||
<polyline points="17 6 23 6 23 12"/>
|
||||
</svg>
|
||||
}
|
||||
variant="success"
|
||||
/>
|
||||
<StatCard
|
||||
label="Deduplication Ratio"
|
||||
value={`${(stats?.deduplication_ratio || 1).toFixed(2)}x`}
|
||||
subvalue="compression achieved"
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<rect x="3" y="3" width="18" height="18" rx="2" ry="2"/>
|
||||
<line x1="3" y1="9" x2="21" y2="9"/>
|
||||
<line x1="9" y1="21" x2="9" y2="9"/>
|
||||
</svg>
|
||||
}
|
||||
variant="accent"
|
||||
/>
|
||||
<StatCard
|
||||
label="Savings Percentage"
|
||||
value={`${(dedupStats?.savings_percentage || 0).toFixed(1)}%`}
|
||||
subvalue="of logical storage"
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<circle cx="12" cy="12" r="10"/>
|
||||
<polyline points="12 6 12 12 16 14"/>
|
||||
</svg>
|
||||
}
|
||||
variant="success"
|
||||
/>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section className="dashboard__section">
|
||||
<h2 className="dashboard__section-title">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<path d="M12 20V10"/>
|
||||
<path d="M18 20V4"/>
|
||||
<path d="M6 20v-4"/>
|
||||
</svg>
|
||||
Artifact Statistics
|
||||
</h2>
|
||||
<div className="stat-grid stat-grid--4">
|
||||
<StatCard
|
||||
label="Total Artifacts"
|
||||
value={formatNumber(stats?.total_artifacts || 0)}
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<path d="M14.5 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V7.5L14.5 2z"/>
|
||||
<polyline points="14 2 14 8 20 8"/>
|
||||
</svg>
|
||||
}
|
||||
/>
|
||||
<StatCard
|
||||
label="Total Uploads"
|
||||
value={formatNumber(stats?.total_uploads || 0)}
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"/>
|
||||
<polyline points="17 8 12 3 7 8"/>
|
||||
<line x1="12" y1="3" x2="12" y2="15"/>
|
||||
</svg>
|
||||
}
|
||||
/>
|
||||
<StatCard
|
||||
label="Deduplicated Uploads"
|
||||
value={formatNumber(stats?.deduplicated_uploads || 0)}
|
||||
subvalue="uploads reusing existing"
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<rect x="9" y="9" width="13" height="13" rx="2" ry="2"/>
|
||||
<path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/>
|
||||
</svg>
|
||||
}
|
||||
variant="success"
|
||||
/>
|
||||
<StatCard
|
||||
label="Unique Artifacts"
|
||||
value={formatNumber(stats?.unique_artifacts || 0)}
|
||||
subvalue="distinct content hashes"
|
||||
icon={
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<polygon points="12 2 15.09 8.26 22 9.27 17 14.14 18.18 21.02 12 17.77 5.82 21.02 7 14.14 2 9.27 8.91 8.26 12 2"/>
|
||||
</svg>
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section className="dashboard__section">
|
||||
<h2 className="dashboard__section-title">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<line x1="12" y1="20" x2="12" y2="10"/>
|
||||
<line x1="18" y1="20" x2="18" y2="4"/>
|
||||
<line x1="6" y1="20" x2="6" y2="16"/>
|
||||
</svg>
|
||||
Deduplication Effectiveness
|
||||
</h2>
|
||||
<div className="effectiveness-grid">
|
||||
<div className="effectiveness-card">
|
||||
<h3>Logical vs Physical Storage</h3>
|
||||
<div className="storage-comparison">
|
||||
<div className="storage-bar">
|
||||
<div className="storage-bar__label">
|
||||
<span>Logical (with duplicates)</span>
|
||||
<span className="storage-bar__value">{formatBytes(dedupStats?.total_logical_bytes || 0)}</span>
|
||||
</div>
|
||||
<ProgressBar
|
||||
value={dedupStats?.total_logical_bytes || 0}
|
||||
max={dedupStats?.total_logical_bytes || 1}
|
||||
showPercentage={false}
|
||||
variant="default"
|
||||
/>
|
||||
</div>
|
||||
<div className="storage-bar">
|
||||
<div className="storage-bar__label">
|
||||
<span>Physical (actual storage)</span>
|
||||
<span className="storage-bar__value">{formatBytes(dedupStats?.total_physical_bytes || 0)}</span>
|
||||
</div>
|
||||
<ProgressBar
|
||||
value={dedupStats?.total_physical_bytes || 0}
|
||||
max={dedupStats?.total_logical_bytes || 1}
|
||||
showPercentage={false}
|
||||
variant="success"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="storage-savings">
|
||||
<div className="storage-savings__icon">
|
||||
<svg width="32" height="32" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<polyline points="20 6 9 17 4 12"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div className="storage-savings__content">
|
||||
<span className="storage-savings__value">{formatBytes(dedupStats?.bytes_saved || 0)}</span>
|
||||
<span className="storage-savings__label">saved through deduplication</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="effectiveness-card">
|
||||
<h3>Deduplication Rate</h3>
|
||||
<div className="dedup-rate">
|
||||
<div className="dedup-rate__circle">
|
||||
<svg viewBox="0 0 100 100" className="dedup-rate__svg">
|
||||
<circle
|
||||
cx="50"
|
||||
cy="50"
|
||||
r="45"
|
||||
fill="none"
|
||||
stroke="var(--border-primary)"
|
||||
strokeWidth="8"
|
||||
/>
|
||||
<circle
|
||||
cx="50"
|
||||
cy="50"
|
||||
r="45"
|
||||
fill="none"
|
||||
stroke="url(#gradient)"
|
||||
strokeWidth="8"
|
||||
strokeLinecap="round"
|
||||
strokeDasharray={`${(dedupStats?.savings_percentage || 0) * 2.827} 282.7`}
|
||||
transform="rotate(-90 50 50)"
|
||||
/>
|
||||
<defs>
|
||||
<linearGradient id="gradient" x1="0%" y1="0%" x2="100%" y2="0%">
|
||||
<stop offset="0%" stopColor="#10b981" />
|
||||
<stop offset="100%" stopColor="#059669" />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
<div className="dedup-rate__value">
|
||||
<span className="dedup-rate__number">{(dedupStats?.savings_percentage || 0).toFixed(1)}</span>
|
||||
<span className="dedup-rate__symbol">%</span>
|
||||
</div>
|
||||
</div>
|
||||
<div className="dedup-rate__details">
|
||||
<div className="dedup-rate__detail">
|
||||
<span className="dedup-rate__detail-value">{(stats?.deduplication_ratio || 1).toFixed(2)}x</span>
|
||||
<span className="dedup-rate__detail-label">Compression Ratio</span>
|
||||
</div>
|
||||
<div className="dedup-rate__detail">
|
||||
<span className="dedup-rate__detail-value">{formatNumber(stats?.deduplicated_uploads || 0)}</span>
|
||||
<span className="dedup-rate__detail-label">Duplicate Uploads</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
{dedupStats && dedupStats.most_referenced_artifacts.length > 0 && (
|
||||
<section className="dashboard__section">
|
||||
<h2 className="dashboard__section-title">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
|
||||
<polygon points="12 2 15.09 8.26 22 9.27 17 14.14 18.18 21.02 12 17.77 5.82 21.02 7 14.14 2 9.27 8.91 8.26 12 2"/>
|
||||
</svg>
|
||||
Top Referenced Artifacts
|
||||
</h2>
|
||||
<p className="dashboard__section-description">
|
||||
These artifacts are referenced most frequently across your storage, maximizing deduplication savings.
|
||||
</p>
|
||||
<DataTable
|
||||
data={dedupStats.most_referenced_artifacts.slice(0, 10)}
|
||||
columns={artifactColumns}
|
||||
keyExtractor={(item) => item.artifact_id}
|
||||
emptyMessage="No referenced artifacts found"
|
||||
className="artifacts-table"
|
||||
/>
|
||||
</section>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default Dashboard;
|
||||
@@ -161,3 +161,67 @@ export interface GlobalSearchResponse {
|
||||
export interface ProjectListParams extends ListParams {
|
||||
visibility?: 'public' | 'private';
|
||||
}
|
||||
|
||||
// Stats types
|
||||
export interface Stats {
|
||||
total_artifacts: number;
|
||||
total_size_bytes: number;
|
||||
unique_artifacts: number;
|
||||
orphaned_artifacts: number;
|
||||
orphaned_size_bytes: number;
|
||||
total_uploads: number;
|
||||
deduplicated_uploads: number;
|
||||
deduplication_ratio: number;
|
||||
storage_saved_bytes: number;
|
||||
}
|
||||
|
||||
export interface ReferencedArtifact {
|
||||
artifact_id: string;
|
||||
ref_count: number;
|
||||
size: number;
|
||||
original_name: string | null;
|
||||
content_type: string | null;
|
||||
storage_saved: number;
|
||||
}
|
||||
|
||||
export interface DeduplicationStats {
|
||||
total_logical_bytes: number;
|
||||
total_physical_bytes: number;
|
||||
bytes_saved: number;
|
||||
savings_percentage: number;
|
||||
total_uploads: number;
|
||||
unique_artifacts: number;
|
||||
duplicate_uploads: number;
|
||||
average_ref_count: number;
|
||||
max_ref_count: number;
|
||||
most_referenced_artifacts: ReferencedArtifact[];
|
||||
}
|
||||
|
||||
export interface TimelineDataPoint {
|
||||
date: string;
|
||||
uploads: number;
|
||||
deduplicated: number;
|
||||
bytes_uploaded: number;
|
||||
bytes_saved: number;
|
||||
}
|
||||
|
||||
export interface TimelineStats {
|
||||
period: 'day' | 'week' | 'month';
|
||||
start_date: string;
|
||||
end_date: string;
|
||||
data_points: TimelineDataPoint[];
|
||||
}
|
||||
|
||||
export interface CrossProjectDuplicate {
|
||||
artifact_id: string;
|
||||
size: number;
|
||||
original_name: string | null;
|
||||
projects: string[];
|
||||
total_references: number;
|
||||
}
|
||||
|
||||
export interface CrossProjectStats {
|
||||
total_cross_project_duplicates: number;
|
||||
bytes_saved_cross_project: number;
|
||||
duplicates: CrossProjectDuplicate[];
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user