Dynamic size
This commit is contained in:
@@ -101,7 +101,13 @@ Same as the traditional `/api/cf/deploy` endpoint.
|
||||
## Client Implementation Example (JavaScript)
|
||||
|
||||
```javascript
|
||||
const CHUNK_SIZE = 5 * 1024 * 1024; // 5MB
|
||||
// You can use ANY chunk size - server supports variable chunk sizes!
|
||||
// Recommended: 1-2MB for Tanzu with memory constraints
|
||||
const CHUNK_SIZE = 1 * 1024 * 1024; // 1MB
|
||||
// Other options:
|
||||
// const CHUNK_SIZE = 512 * 1024; // 512KB (very safe)
|
||||
// const CHUNK_SIZE = 2 * 1024 * 1024; // 2MB (balanced)
|
||||
// const CHUNK_SIZE = 5 * 1024 * 1024; // 5MB (if you have memory)
|
||||
|
||||
async function deployWithChunks(jarFile, manifestFile, deploymentConfig) {
|
||||
const apiBase = 'https://your-app.example.com/api/cf';
|
||||
@@ -191,7 +197,8 @@ deployWithChunks(jarInput.files[0], manifestInput.files[0], config)
|
||||
import requests
|
||||
import os
|
||||
|
||||
CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
|
||||
# You can use ANY chunk size!
|
||||
CHUNK_SIZE = 1 * 1024 * 1024 # 1MB (recommended for Tanzu)
|
||||
|
||||
def deploy_with_chunks(api_base, jar_path, manifest_path, deployment_config):
|
||||
# Step 1: Initialize upload session
|
||||
@@ -316,13 +323,13 @@ server {
|
||||
|
||||
```properties
|
||||
# Chunked Upload Configuration
|
||||
cf.upload.chunk.size=5242880
|
||||
cf.upload.session.timeout-minutes=30
|
||||
```
|
||||
|
||||
- **cf.upload.chunk.size**: Size of each chunk in bytes (default: 5MB)
|
||||
- **cf.upload.session.timeout-minutes**: How long inactive sessions are kept (default: 30 minutes)
|
||||
|
||||
**Note:** There is NO server-side chunk size configuration. The server accepts ANY chunk size from the client. Chunks are appended sequentially as they arrive.
|
||||
|
||||
## Session Management
|
||||
|
||||
- Upload sessions expire after 30 minutes of inactivity (configurable)
|
||||
@@ -356,17 +363,22 @@ The traditional `/api/cf/deploy` endpoint remains available and functional. You
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- **Chunk size**: 5MB is a good balance between request count and size
|
||||
- Smaller chunks = more requests but safer for proxies
|
||||
- Larger chunks = fewer requests but may hit proxy limits
|
||||
- **Chunk size**: Client controls this completely
|
||||
- **Smaller chunks (512KB-1MB)**: More requests, but safer for memory-constrained servers and strict proxies
|
||||
- **Larger chunks (5-10MB)**: Fewer requests, faster uploads, but needs more memory
|
||||
- **Recommended for Tanzu**: 1MB (good balance for low-memory environments)
|
||||
- **Any size works**: Server accepts variable chunk sizes
|
||||
|
||||
- **Parallel uploads**: Current implementation is sequential
|
||||
- Files are uploaded one chunk at a time
|
||||
- Chunks must be uploaded in order (0, 1, 2, ...)
|
||||
- **Sequential upload requirement**: **CRITICAL**
|
||||
- Chunks **MUST** be uploaded in order: 0, 1, 2, 3...
|
||||
- Server validates and enforces sequential order
|
||||
- Out-of-order chunks will be rejected
|
||||
- This is necessary because chunks are appended sequentially to the file
|
||||
|
||||
- **Network reliability**: Chunked uploads are more resilient
|
||||
- Failed chunks can be retried individually
|
||||
- No need to re-upload the entire file on failure
|
||||
- Just retry the specific failed chunk index
|
||||
|
||||
## Monitoring
|
||||
|
||||
|
||||
@@ -61,13 +61,21 @@ public class ChunkedUploadService {
|
||||
fileType, fileState.getTotalChunks(), totalChunks));
|
||||
}
|
||||
|
||||
// Write chunk to file using streaming to avoid loading entire chunk into memory
|
||||
// Write chunk to file using sequential append mode
|
||||
// This supports variable chunk sizes - chunks MUST be uploaded in order (0, 1, 2, ...)
|
||||
Path targetPath = fileState.getTargetPath();
|
||||
long offset = (long) chunkIndex * getChunkSize();
|
||||
|
||||
try (RandomAccessFile raf = new RandomAccessFile(targetPath.toFile(), "rw");
|
||||
var inputStream = chunk.getInputStream()) {
|
||||
raf.seek(offset);
|
||||
// Verify chunks are uploaded in order
|
||||
if (chunkIndex != fileState.getReceivedChunkCount()) {
|
||||
throw new IllegalArgumentException(
|
||||
String.format("Chunks must be uploaded in order. Expected chunk %d but received %d",
|
||||
fileState.getReceivedChunkCount(), chunkIndex));
|
||||
}
|
||||
|
||||
try (var inputStream = chunk.getInputStream();
|
||||
var outputStream = Files.newOutputStream(targetPath,
|
||||
java.nio.file.StandardOpenOption.CREATE,
|
||||
java.nio.file.StandardOpenOption.APPEND)) {
|
||||
|
||||
// Stream chunk data in smaller buffers to reduce memory pressure
|
||||
byte[] buffer = new byte[8192]; // 8KB buffer
|
||||
@@ -75,12 +83,12 @@ public class ChunkedUploadService {
|
||||
long totalWritten = 0;
|
||||
|
||||
while ((bytesRead = inputStream.read(buffer)) != -1) {
|
||||
raf.write(buffer, 0, bytesRead);
|
||||
outputStream.write(buffer, 0, bytesRead);
|
||||
totalWritten += bytesRead;
|
||||
}
|
||||
|
||||
log.debug("Wrote chunk {} ({} bytes) to {} at offset {}",
|
||||
chunkIndex, totalWritten, targetPath.getFileName(), offset);
|
||||
log.debug("Appended chunk {} ({} bytes) to {}",
|
||||
chunkIndex, totalWritten, targetPath.getFileName());
|
||||
}
|
||||
|
||||
fileState.markChunkReceived(chunkIndex);
|
||||
@@ -160,11 +168,6 @@ public class ChunkedUploadService {
|
||||
}
|
||||
}
|
||||
|
||||
private int getChunkSize() {
|
||||
// Default chunk size - should match client-side
|
||||
return 5 * 1024 * 1024; // 5MB
|
||||
}
|
||||
|
||||
public int getActiveSessionCount() {
|
||||
return activeSessions.size();
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ cf.cli.timeout=600
|
||||
cf.cli.path=
|
||||
|
||||
# Chunked Upload Configuration
|
||||
# Reduced chunk size to 2MB to avoid memory issues on low-memory Tanzu instances
|
||||
cf.upload.chunk.size=2097152
|
||||
# Session timeout in minutes (default: 30 minutes)
|
||||
cf.upload.session.timeout-minutes=30
|
||||
# Note: Chunk size is controlled by the client, not the server.
|
||||
# Server accepts any chunk size and appends chunks sequentially.
|
||||
|
||||
# Logging Configuration
|
||||
logging.level.root=INFO
|
||||
|
||||
Reference in New Issue
Block a user