Cleanup: improve pod naming, remove dead code, update docs

This commit is contained in:
Mondo Diaz
2026-01-14 19:08:58 +00:00
parent 1bb0c4e911
commit 96b77fd9c0
3 changed files with 17 additions and 124 deletions

View File

@@ -11,6 +11,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added `deploy_feature` job with dynamic hostnames and unique release names (#51) - Added `deploy_feature` job with dynamic hostnames and unique release names (#51)
- Added `cleanup_feature` job with `on_stop` for automatic cleanup on merge (#51) - Added `cleanup_feature` job with `on_stop` for automatic cleanup on merge (#51)
- Added `values-dev.yaml` Helm values for lightweight ephemeral environments (#51) - Added `values-dev.yaml` Helm values for lightweight ephemeral environments (#51)
- Added main branch deployment to stage environment (#51)
- Added post-deployment integration tests (#51)
- Added internal proxy configuration for npm, pip, helm, and apt (#51)
### Changed
- Improved pod naming: Orchard pods now named `orchard-{env}-server-*` for clarity (#51)
### Fixed
- Fixed `cleanup_feature` job failing when branch is deleted (`GIT_STRATEGY: none`) (#51)
- Fixed gitleaks false positives with fingerprints for historical commits (#51)
- Fixed integration tests running when deploy fails (`when: on_success`) (#51)
### Removed
- Removed unused `store_streaming()` method from storage.py (#51)
## [0.4.0] - 2026-01-12 ## [0.4.0] - 2026-01-12
### Added ### Added

View File

@@ -6,7 +6,6 @@ from typing import (
Optional, Optional,
Dict, Dict,
Any, Any,
Generator,
NamedTuple, NamedTuple,
Protocol, Protocol,
runtime_checkable, runtime_checkable,
@@ -511,127 +510,6 @@ class S3Storage:
) )
raise raise
def store_streaming(self, chunks: Generator[bytes, None, None]) -> StorageResult:
"""
Store a file from a stream of chunks.
First accumulates to compute hash, then uploads.
For truly large files, consider using initiate_resumable_upload instead.
"""
# Accumulate chunks and compute all hashes
sha256_hasher = hashlib.sha256()
md5_hasher = hashlib.md5()
sha1_hasher = hashlib.sha1()
all_chunks = []
size = 0
for chunk in chunks:
sha256_hasher.update(chunk)
md5_hasher.update(chunk)
sha1_hasher.update(chunk)
all_chunks.append(chunk)
size += len(chunk)
sha256_hash = sha256_hasher.hexdigest()
md5_hash = md5_hasher.hexdigest()
sha1_hash = sha1_hasher.hexdigest()
s3_key = f"fruits/{sha256_hash[:2]}/{sha256_hash[2:4]}/{sha256_hash}"
s3_etag = None
# Check if already exists
if self._exists(s3_key):
obj_info = self.get_object_info(s3_key)
s3_etag = obj_info.get("etag", "").strip('"') if obj_info else None
return StorageResult(
sha256=sha256_hash,
size=size,
s3_key=s3_key,
md5=md5_hash,
sha1=sha1_hash,
s3_etag=s3_etag,
already_existed=True,
)
# Upload based on size
if size < MULTIPART_THRESHOLD:
content = b"".join(all_chunks)
response = self.client.put_object(
Bucket=self.bucket, Key=s3_key, Body=content
)
s3_etag = response.get("ETag", "").strip('"')
else:
# Use multipart for large files
mpu = self.client.create_multipart_upload(Bucket=self.bucket, Key=s3_key)
upload_id = mpu["UploadId"]
try:
parts = []
part_number = 1
buffer = b""
for chunk in all_chunks:
buffer += chunk
while len(buffer) >= MULTIPART_CHUNK_SIZE:
part_data = buffer[:MULTIPART_CHUNK_SIZE]
buffer = buffer[MULTIPART_CHUNK_SIZE:]
response = self.client.upload_part(
Bucket=self.bucket,
Key=s3_key,
UploadId=upload_id,
PartNumber=part_number,
Body=part_data,
)
parts.append(
{
"PartNumber": part_number,
"ETag": response["ETag"],
}
)
part_number += 1
# Upload remaining buffer
if buffer:
response = self.client.upload_part(
Bucket=self.bucket,
Key=s3_key,
UploadId=upload_id,
PartNumber=part_number,
Body=buffer,
)
parts.append(
{
"PartNumber": part_number,
"ETag": response["ETag"],
}
)
complete_response = self.client.complete_multipart_upload(
Bucket=self.bucket,
Key=s3_key,
UploadId=upload_id,
MultipartUpload={"Parts": parts},
)
s3_etag = complete_response.get("ETag", "").strip('"')
except Exception as e:
logger.error(f"Streaming multipart upload failed: {e}")
self.client.abort_multipart_upload(
Bucket=self.bucket,
Key=s3_key,
UploadId=upload_id,
)
raise
return StorageResult(
sha256=sha256_hash,
size=size,
s3_key=s3_key,
md5=md5_hash,
sha1=sha1_hash,
s3_etag=s3_etag,
already_existed=False,
)
def initiate_resumable_upload(self, expected_hash: str) -> Dict[str, Any]: def initiate_resumable_upload(self, expected_hash: str) -> Dict[str, Any]:
""" """
Initiate a resumable upload session. Initiate a resumable upload session.

View File

@@ -7,6 +7,7 @@ Expand the name of the chart.
{{/* {{/*
Create a default fully qualified app name. Create a default fully qualified app name.
Appends "-server" to distinguish from subcharts (minio, postgresql, redis).
*/}} */}}
{{- define "orchard.fullname" -}} {{- define "orchard.fullname" -}}
{{- if .Values.fullnameOverride }} {{- if .Values.fullnameOverride }}
@@ -14,9 +15,9 @@ Create a default fully qualified app name.
{{- else }} {{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }} {{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }} {{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- printf "%s-server" .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }} {{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- printf "%s-%s-server" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}