write to disk
This commit is contained in:
@@ -61,16 +61,26 @@ public class ChunkedUploadService {
|
|||||||
fileType, fileState.getTotalChunks(), totalChunks));
|
fileType, fileState.getTotalChunks(), totalChunks));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write chunk to file
|
// Write chunk to file using streaming to avoid loading entire chunk into memory
|
||||||
Path targetPath = fileState.getTargetPath();
|
Path targetPath = fileState.getTargetPath();
|
||||||
long offset = (long) chunkIndex * getChunkSize();
|
long offset = (long) chunkIndex * getChunkSize();
|
||||||
|
|
||||||
try (RandomAccessFile raf = new RandomAccessFile(targetPath.toFile(), "rw")) {
|
try (RandomAccessFile raf = new RandomAccessFile(targetPath.toFile(), "rw");
|
||||||
|
var inputStream = chunk.getInputStream()) {
|
||||||
raf.seek(offset);
|
raf.seek(offset);
|
||||||
byte[] data = chunk.getBytes();
|
|
||||||
raf.write(data);
|
// Stream chunk data in smaller buffers to reduce memory pressure
|
||||||
|
byte[] buffer = new byte[8192]; // 8KB buffer
|
||||||
|
int bytesRead;
|
||||||
|
long totalWritten = 0;
|
||||||
|
|
||||||
|
while ((bytesRead = inputStream.read(buffer)) != -1) {
|
||||||
|
raf.write(buffer, 0, bytesRead);
|
||||||
|
totalWritten += bytesRead;
|
||||||
|
}
|
||||||
|
|
||||||
log.debug("Wrote chunk {} ({} bytes) to {} at offset {}",
|
log.debug("Wrote chunk {} ({} bytes) to {} at offset {}",
|
||||||
chunkIndex, data.length, targetPath.getFileName(), offset);
|
chunkIndex, totalWritten, targetPath.getFileName(), offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
fileState.markChunkReceived(chunkIndex);
|
fileState.markChunkReceived(chunkIndex);
|
||||||
|
|||||||
@@ -5,17 +5,19 @@ server.port=8080
|
|||||||
spring.application.name=cf-deployer
|
spring.application.name=cf-deployer
|
||||||
|
|
||||||
# Multipart Configuration - for traditional single upload endpoint
|
# Multipart Configuration - for traditional single upload endpoint
|
||||||
spring.servlet.multipart.max-file-size=500MB
|
spring.servlet.multipart.max-file-size=10MB
|
||||||
spring.servlet.multipart.max-request-size=500MB
|
spring.servlet.multipart.max-request-size=10MB
|
||||||
spring.servlet.multipart.enabled=true
|
spring.servlet.multipart.enabled=true
|
||||||
|
# Write all uploads directly to disk, not memory (prevents OutOfMemoryError)
|
||||||
|
spring.servlet.multipart.file-size-threshold=0
|
||||||
|
|
||||||
# Cloud Foundry CLI Configuration
|
# Cloud Foundry CLI Configuration
|
||||||
cf.cli.timeout=600
|
cf.cli.timeout=600
|
||||||
cf.cli.path=
|
cf.cli.path=
|
||||||
|
|
||||||
# Chunked Upload Configuration
|
# Chunked Upload Configuration
|
||||||
# Recommended chunk size: 5MB (client-side should match this)
|
# Reduced chunk size to 2MB to avoid memory issues on low-memory Tanzu instances
|
||||||
cf.upload.chunk.size=5242880
|
cf.upload.chunk.size=2097152
|
||||||
# Session timeout in minutes (default: 30 minutes)
|
# Session timeout in minutes (default: 30 minutes)
|
||||||
cf.upload.session.timeout-minutes=30
|
cf.upload.session.timeout-minutes=30
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user