Try out chunking
This commit is contained in:
390
CHUNKED_UPLOAD_GUIDE.md
Normal file
390
CHUNKED_UPLOAD_GUIDE.md
Normal file
@@ -0,0 +1,390 @@
|
|||||||
|
# Chunked Upload Implementation Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This application now supports chunked file uploads to avoid nginx 413 "Request Entity Too Large" errors when deploying large JAR files through a load balancer.
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
Instead of uploading the entire JAR and manifest files in a single request, files are split into smaller chunks (default 5MB) and uploaded sequentially. The server reassembles the chunks before deployment.
|
||||||
|
|
||||||
|
## API Endpoints
|
||||||
|
|
||||||
|
### 1. Initialize Upload Session
|
||||||
|
**POST** `/api/cf/upload/init`
|
||||||
|
|
||||||
|
Creates a new upload session and returns a session ID.
|
||||||
|
|
||||||
|
**Request Body:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"apiEndpoint": "https://api.cf.example.com",
|
||||||
|
"username": "your-username",
|
||||||
|
"password": "your-password",
|
||||||
|
"organization": "your-org",
|
||||||
|
"space": "your-space",
|
||||||
|
"appName": "your-app",
|
||||||
|
"skipSslValidation": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||||
|
"message": "Upload session created successfully"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Upload File Chunk
|
||||||
|
**POST** `/api/cf/upload/chunk`
|
||||||
|
|
||||||
|
Upload a single chunk of a file.
|
||||||
|
|
||||||
|
**Request Parameters:**
|
||||||
|
- `uploadSessionId` (string): The session ID from step 1
|
||||||
|
- `fileType` (string): Either "jarFile" or "manifest"
|
||||||
|
- `chunkIndex` (integer): Zero-based index of this chunk (0, 1, 2, ...)
|
||||||
|
- `totalChunks` (integer): Total number of chunks for this file
|
||||||
|
- `fileName` (string, optional): Original filename (required for jarFile)
|
||||||
|
- `chunk` (multipart file): The chunk data
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||||
|
"fileType": "jarFile",
|
||||||
|
"chunkIndex": 0,
|
||||||
|
"totalChunks": 10,
|
||||||
|
"receivedChunks": 1,
|
||||||
|
"message": "Chunk uploaded successfully"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Get Upload Status
|
||||||
|
**GET** `/api/cf/upload/status/{uploadSessionId}`
|
||||||
|
|
||||||
|
Check the status of an upload session.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"jarFile": {
|
||||||
|
"fileName": "myapp.jar",
|
||||||
|
"totalChunks": 10,
|
||||||
|
"receivedChunks": {
|
||||||
|
"0": true,
|
||||||
|
"1": true,
|
||||||
|
"2": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"manifest": {
|
||||||
|
"fileName": "manifest.yml",
|
||||||
|
"totalChunks": 1,
|
||||||
|
"receivedChunks": {
|
||||||
|
"0": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Finalize Upload and Deploy
|
||||||
|
**POST** `/api/cf/upload/finalize?uploadSessionId={sessionId}`
|
||||||
|
|
||||||
|
Triggers the deployment after all chunks are uploaded.
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
Same as the traditional `/api/cf/deploy` endpoint.
|
||||||
|
|
||||||
|
## Client Implementation Example (JavaScript)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const CHUNK_SIZE = 5 * 1024 * 1024; // 5MB
|
||||||
|
|
||||||
|
async function deployWithChunks(jarFile, manifestFile, deploymentConfig) {
|
||||||
|
const apiBase = 'https://your-app.example.com/api/cf';
|
||||||
|
|
||||||
|
// Step 1: Initialize upload session
|
||||||
|
const initResponse = await fetch(`${apiBase}/upload/init`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify(deploymentConfig)
|
||||||
|
});
|
||||||
|
|
||||||
|
const { uploadSessionId } = await initResponse.json();
|
||||||
|
console.log('Upload session created:', uploadSessionId);
|
||||||
|
|
||||||
|
// Step 2: Upload JAR file in chunks
|
||||||
|
await uploadFileInChunks(apiBase, uploadSessionId, 'jarFile', jarFile);
|
||||||
|
|
||||||
|
// Step 3: Upload manifest file in chunks
|
||||||
|
await uploadFileInChunks(apiBase, uploadSessionId, 'manifest', manifestFile);
|
||||||
|
|
||||||
|
// Step 4: Finalize and deploy
|
||||||
|
const deployResponse = await fetch(
|
||||||
|
`${apiBase}/upload/finalize?uploadSessionId=${uploadSessionId}`,
|
||||||
|
{ method: 'POST' }
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await deployResponse.json();
|
||||||
|
console.log('Deployment result:', result);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function uploadFileInChunks(apiBase, sessionId, fileType, file) {
|
||||||
|
const totalChunks = Math.ceil(file.size / CHUNK_SIZE);
|
||||||
|
console.log(`Uploading ${fileType}: ${file.name} (${totalChunks} chunks)`);
|
||||||
|
|
||||||
|
for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) {
|
||||||
|
const start = chunkIndex * CHUNK_SIZE;
|
||||||
|
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||||
|
const chunk = file.slice(start, end);
|
||||||
|
|
||||||
|
const formData = new FormData();
|
||||||
|
formData.append('chunk', chunk);
|
||||||
|
formData.append('uploadSessionId', sessionId);
|
||||||
|
formData.append('fileType', fileType);
|
||||||
|
formData.append('chunkIndex', chunkIndex);
|
||||||
|
formData.append('totalChunks', totalChunks);
|
||||||
|
formData.append('fileName', file.name);
|
||||||
|
|
||||||
|
const response = await fetch(`${apiBase}/upload/chunk`, {
|
||||||
|
method: 'POST',
|
||||||
|
body: formData
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
console.log(`Chunk ${chunkIndex + 1}/${totalChunks} uploaded for ${fileType}`);
|
||||||
|
|
||||||
|
if (!result.success) {
|
||||||
|
throw new Error(`Failed to upload chunk: ${result.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`${fileType} upload complete`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage
|
||||||
|
const jarInput = document.getElementById('jarFile');
|
||||||
|
const manifestInput = document.getElementById('manifestFile');
|
||||||
|
|
||||||
|
const config = {
|
||||||
|
apiEndpoint: 'https://api.cf.example.com',
|
||||||
|
username: 'user',
|
||||||
|
password: 'pass',
|
||||||
|
organization: 'my-org',
|
||||||
|
space: 'dev',
|
||||||
|
appName: 'my-app',
|
||||||
|
skipSslValidation: false
|
||||||
|
};
|
||||||
|
|
||||||
|
deployWithChunks(jarInput.files[0], manifestInput.files[0], config)
|
||||||
|
.then(result => console.log('Success:', result))
|
||||||
|
.catch(error => console.error('Error:', error));
|
||||||
|
```
|
||||||
|
|
||||||
|
## Client Implementation Example (Python)
|
||||||
|
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
import os
|
||||||
|
|
||||||
|
CHUNK_SIZE = 5 * 1024 * 1024 # 5MB
|
||||||
|
|
||||||
|
def deploy_with_chunks(api_base, jar_path, manifest_path, deployment_config):
|
||||||
|
# Step 1: Initialize upload session
|
||||||
|
response = requests.post(
|
||||||
|
f"{api_base}/upload/init",
|
||||||
|
json=deployment_config
|
||||||
|
)
|
||||||
|
session_id = response.json()['uploadSessionId']
|
||||||
|
print(f"Upload session created: {session_id}")
|
||||||
|
|
||||||
|
# Step 2: Upload JAR file in chunks
|
||||||
|
upload_file_in_chunks(api_base, session_id, 'jarFile', jar_path)
|
||||||
|
|
||||||
|
# Step 3: Upload manifest file in chunks
|
||||||
|
upload_file_in_chunks(api_base, session_id, 'manifest', manifest_path)
|
||||||
|
|
||||||
|
# Step 4: Finalize and deploy
|
||||||
|
response = requests.post(
|
||||||
|
f"{api_base}/upload/finalize",
|
||||||
|
params={'uploadSessionId': session_id}
|
||||||
|
)
|
||||||
|
|
||||||
|
result = response.json()
|
||||||
|
print(f"Deployment result: {result}")
|
||||||
|
return result
|
||||||
|
|
||||||
|
def upload_file_in_chunks(api_base, session_id, file_type, file_path):
|
||||||
|
file_size = os.path.getsize(file_path)
|
||||||
|
total_chunks = (file_size + CHUNK_SIZE - 1) // CHUNK_SIZE
|
||||||
|
file_name = os.path.basename(file_path)
|
||||||
|
|
||||||
|
print(f"Uploading {file_type}: {file_name} ({total_chunks} chunks)")
|
||||||
|
|
||||||
|
with open(file_path, 'rb') as f:
|
||||||
|
for chunk_index in range(total_chunks):
|
||||||
|
chunk_data = f.read(CHUNK_SIZE)
|
||||||
|
|
||||||
|
files = {'chunk': (f'chunk_{chunk_index}', chunk_data)}
|
||||||
|
data = {
|
||||||
|
'uploadSessionId': session_id,
|
||||||
|
'fileType': file_type,
|
||||||
|
'chunkIndex': chunk_index,
|
||||||
|
'totalChunks': total_chunks,
|
||||||
|
'fileName': file_name
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
f"{api_base}/upload/chunk",
|
||||||
|
files=files,
|
||||||
|
data=data
|
||||||
|
)
|
||||||
|
|
||||||
|
result = response.json()
|
||||||
|
print(f"Chunk {chunk_index + 1}/{total_chunks} uploaded for {file_type}")
|
||||||
|
|
||||||
|
if not result.get('success'):
|
||||||
|
raise Exception(f"Failed to upload chunk: {result.get('message')}")
|
||||||
|
|
||||||
|
print(f"{file_type} upload complete")
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
config = {
|
||||||
|
'apiEndpoint': 'https://api.cf.example.com',
|
||||||
|
'username': 'user',
|
||||||
|
'password': 'pass',
|
||||||
|
'organization': 'my-org',
|
||||||
|
'space': 'dev',
|
||||||
|
'appName': 'my-app',
|
||||||
|
'skipSslValidation': False
|
||||||
|
}
|
||||||
|
|
||||||
|
deploy_with_chunks(
|
||||||
|
'https://your-app.example.com/api/cf',
|
||||||
|
'/path/to/app.jar',
|
||||||
|
'/path/to/manifest.yml',
|
||||||
|
config
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Nginx Configuration
|
||||||
|
|
||||||
|
For the chunked upload to work properly with nginx, you need minimal configuration changes:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name your-app.example.com;
|
||||||
|
|
||||||
|
# Important: Set client_max_body_size for individual chunks
|
||||||
|
# This should be slightly larger than your chunk size (5MB chunks -> 10MB limit)
|
||||||
|
client_max_body_size 10m;
|
||||||
|
|
||||||
|
# Increase timeouts for long deployments
|
||||||
|
proxy_read_timeout 900s;
|
||||||
|
proxy_connect_timeout 900s;
|
||||||
|
proxy_send_timeout 900s;
|
||||||
|
|
||||||
|
location /api/cf/ {
|
||||||
|
proxy_pass http://cf-deployer-backend:8080;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# Buffer settings for chunked uploads
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Key Nginx Settings:
|
||||||
|
|
||||||
|
1. **client_max_body_size**: Set to ~10MB (double your chunk size for safety)
|
||||||
|
2. **proxy_buffering off**: Prevents nginx from buffering the entire request
|
||||||
|
3. **proxy_request_buffering off**: Allows streaming of request body
|
||||||
|
4. **Increased timeouts**: CF deployments can take several minutes
|
||||||
|
|
||||||
|
## Configuration Properties
|
||||||
|
|
||||||
|
### application.properties
|
||||||
|
|
||||||
|
```properties
|
||||||
|
# Chunked Upload Configuration
|
||||||
|
cf.upload.chunk.size=5242880
|
||||||
|
cf.upload.session.timeout-minutes=30
|
||||||
|
```
|
||||||
|
|
||||||
|
- **cf.upload.chunk.size**: Size of each chunk in bytes (default: 5MB)
|
||||||
|
- **cf.upload.session.timeout-minutes**: How long inactive sessions are kept (default: 30 minutes)
|
||||||
|
|
||||||
|
## Session Management
|
||||||
|
|
||||||
|
- Upload sessions expire after 30 minutes of inactivity (configurable)
|
||||||
|
- Expired sessions are automatically cleaned up every 5 minutes
|
||||||
|
- Sessions are deleted after successful deployment
|
||||||
|
- Each session maintains its own temporary directory
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Common Errors:
|
||||||
|
|
||||||
|
1. **"Upload session not found or expired"**
|
||||||
|
- Session timed out (default: 30 minutes)
|
||||||
|
- Invalid session ID
|
||||||
|
- Solution: Create a new upload session
|
||||||
|
|
||||||
|
2. **"Upload incomplete. Not all file chunks received"**
|
||||||
|
- Not all chunks were uploaded before calling finalize
|
||||||
|
- Solution: Check upload status and retry missing chunks
|
||||||
|
|
||||||
|
3. **"Total chunks mismatch"**
|
||||||
|
- Different totalChunks value sent for the same file
|
||||||
|
- Solution: Ensure consistent totalChunks across all chunk uploads
|
||||||
|
|
||||||
|
## Migration from Traditional Upload
|
||||||
|
|
||||||
|
The traditional `/api/cf/deploy` endpoint remains available and functional. You can:
|
||||||
|
|
||||||
|
1. **Keep using the traditional endpoint** for deployments behind nginx if you increase nginx `client_max_body_size` to 500MB+
|
||||||
|
2. **Migrate to chunked uploads** for better reliability and to avoid nginx 413 errors without increasing limits
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
- **Chunk size**: 5MB is a good balance between request count and size
|
||||||
|
- Smaller chunks = more requests but safer for proxies
|
||||||
|
- Larger chunks = fewer requests but may hit proxy limits
|
||||||
|
|
||||||
|
- **Parallel uploads**: Current implementation is sequential
|
||||||
|
- Files are uploaded one chunk at a time
|
||||||
|
- Chunks must be uploaded in order (0, 1, 2, ...)
|
||||||
|
|
||||||
|
- **Network reliability**: Chunked uploads are more resilient
|
||||||
|
- Failed chunks can be retried individually
|
||||||
|
- No need to re-upload the entire file on failure
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
Check active upload sessions:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# The ChunkedUploadService tracks active sessions
|
||||||
|
# Monitor via application logs or add a custom endpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
Example log output:
|
||||||
|
```
|
||||||
|
2025-10-21 10:15:30 - Created upload session: 550e8400-e29b-41d4-a716-446655440000
|
||||||
|
2025-10-21 10:15:31 - Session 550e8400...: Received chunk 1/10 for jarFile (5242880 bytes)
|
||||||
|
2025-10-21 10:15:32 - Session 550e8400...: Received chunk 2/10 for jarFile (5242880 bytes)
|
||||||
|
...
|
||||||
|
2025-10-21 10:16:00 - Session 550e8400...: File jarFile upload completed (10 chunks)
|
||||||
|
2025-10-21 10:16:01 - Starting deployment for app: my-app from session: 550e8400...
|
||||||
|
2025-10-21 10:18:00 - Deployment completed successfully
|
||||||
|
2025-10-21 10:18:00 - Deleted upload session: 550e8400-e29b-41d4-a716-446655440000
|
||||||
|
```
|
||||||
@@ -2,8 +2,10 @@ package com.cfdeployer;
|
|||||||
|
|
||||||
import org.springframework.boot.SpringApplication;
|
import org.springframework.boot.SpringApplication;
|
||||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||||
|
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||||
|
|
||||||
@SpringBootApplication
|
@SpringBootApplication
|
||||||
|
@EnableScheduling
|
||||||
public class CfDeployerApplication {
|
public class CfDeployerApplication {
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
|
|||||||
@@ -2,7 +2,10 @@ package com.cfdeployer.controller;
|
|||||||
|
|
||||||
import com.cfdeployer.model.CfDeployRequest;
|
import com.cfdeployer.model.CfDeployRequest;
|
||||||
import com.cfdeployer.model.CfDeployResponse;
|
import com.cfdeployer.model.CfDeployResponse;
|
||||||
|
import com.cfdeployer.model.ChunkUploadRequest;
|
||||||
|
import com.cfdeployer.model.ChunkUploadResponse;
|
||||||
import com.cfdeployer.service.CfCliService;
|
import com.cfdeployer.service.CfCliService;
|
||||||
|
import com.cfdeployer.service.ChunkedUploadService;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import jakarta.validation.Valid;
|
import jakarta.validation.Valid;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
@@ -22,6 +25,7 @@ public class CfDeployController {
|
|||||||
private static final Logger log = LoggerFactory.getLogger(CfDeployController.class);
|
private static final Logger log = LoggerFactory.getLogger(CfDeployController.class);
|
||||||
|
|
||||||
private final CfCliService cfCliService;
|
private final CfCliService cfCliService;
|
||||||
|
private final ChunkedUploadService chunkedUploadService;
|
||||||
private final ObjectMapper objectMapper;
|
private final ObjectMapper objectMapper;
|
||||||
|
|
||||||
@PostMapping(value = "/deploy", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
|
@PostMapping(value = "/deploy", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
|
||||||
@@ -118,6 +122,126 @@ public class CfDeployController {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Chunked upload endpoints
|
||||||
|
@PostMapping("/upload/init")
|
||||||
|
public ResponseEntity<ChunkUploadResponse> initUpload(@RequestBody String requestJson) {
|
||||||
|
try {
|
||||||
|
log.info("Initializing chunked upload session");
|
||||||
|
|
||||||
|
// Validate the request JSON
|
||||||
|
CfDeployRequest request = objectMapper.readValue(requestJson, CfDeployRequest.class);
|
||||||
|
log.info("Creating upload session for app: {}", request.getAppName());
|
||||||
|
|
||||||
|
String sessionId = chunkedUploadService.createUploadSession(requestJson);
|
||||||
|
|
||||||
|
return ResponseEntity.ok(ChunkUploadResponse.builder()
|
||||||
|
.success(true)
|
||||||
|
.uploadSessionId(sessionId)
|
||||||
|
.message("Upload session created successfully")
|
||||||
|
.build());
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Error initializing upload session", e);
|
||||||
|
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||||
|
.body(ChunkUploadResponse.failure("Failed to initialize upload: " + e.getMessage()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@PostMapping("/upload/chunk")
|
||||||
|
public ResponseEntity<ChunkUploadResponse> uploadChunk(
|
||||||
|
@RequestParam("uploadSessionId") String uploadSessionId,
|
||||||
|
@RequestParam("fileType") String fileType,
|
||||||
|
@RequestParam("chunkIndex") Integer chunkIndex,
|
||||||
|
@RequestParam("totalChunks") Integer totalChunks,
|
||||||
|
@RequestParam(value = "fileName", required = false) String fileName,
|
||||||
|
@RequestPart("chunk") MultipartFile chunk) {
|
||||||
|
try {
|
||||||
|
log.debug("Receiving chunk {}/{} for session: {}, fileType: {}",
|
||||||
|
chunkIndex + 1, totalChunks, uploadSessionId, fileType);
|
||||||
|
|
||||||
|
// Validate file type
|
||||||
|
if (!fileType.equals("jarFile") && !fileType.equals("manifest")) {
|
||||||
|
throw new IllegalArgumentException("Invalid file type. Must be 'jarFile' or 'manifest'");
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkedUploadService.uploadChunk(uploadSessionId, fileType, fileName,
|
||||||
|
chunkIndex, totalChunks, chunk);
|
||||||
|
|
||||||
|
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||||
|
var fileState = session.getFileStates().get(fileType);
|
||||||
|
|
||||||
|
return ResponseEntity.ok(ChunkUploadResponse.success(
|
||||||
|
uploadSessionId, fileType, chunkIndex, totalChunks,
|
||||||
|
fileState.getReceivedChunkCount()));
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Error uploading chunk", e);
|
||||||
|
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||||
|
.body(ChunkUploadResponse.failure("Failed to upload chunk: " + e.getMessage()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@PostMapping("/upload/finalize")
|
||||||
|
public ResponseEntity<CfDeployResponse> finalizeUpload(@RequestParam("uploadSessionId") String uploadSessionId) {
|
||||||
|
try {
|
||||||
|
log.info("Finalizing upload for session: {}", uploadSessionId);
|
||||||
|
|
||||||
|
if (!chunkedUploadService.isSessionReady(uploadSessionId)) {
|
||||||
|
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||||
|
.body(CfDeployResponse.failure("Upload incomplete. Not all file chunks received.", null));
|
||||||
|
}
|
||||||
|
|
||||||
|
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||||
|
if (session == null) {
|
||||||
|
return ResponseEntity.status(HttpStatus.NOT_FOUND)
|
||||||
|
.body(CfDeployResponse.failure("Upload session not found or expired", null));
|
||||||
|
}
|
||||||
|
|
||||||
|
CfDeployRequest request = objectMapper.readValue(session.getRequestJson(), CfDeployRequest.class);
|
||||||
|
log.info("Starting deployment for app: {} from session: {}",
|
||||||
|
request.getAppName(), uploadSessionId);
|
||||||
|
|
||||||
|
// Get file paths from session
|
||||||
|
var jarState = session.getFileStates().get("jarFile");
|
||||||
|
var manifestState = session.getFileStates().get("manifest");
|
||||||
|
|
||||||
|
CfDeployResponse response = cfCliService.deployApplicationFromPaths(
|
||||||
|
request,
|
||||||
|
jarState.getTargetPath(),
|
||||||
|
manifestState.getTargetPath());
|
||||||
|
|
||||||
|
// Clean up session after deployment
|
||||||
|
chunkedUploadService.deleteSession(uploadSessionId);
|
||||||
|
|
||||||
|
if (Boolean.TRUE.equals(response.getSuccess())) {
|
||||||
|
return ResponseEntity.ok(response);
|
||||||
|
} else {
|
||||||
|
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(response);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Error finalizing upload", e);
|
||||||
|
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||||
|
.body(CfDeployResponse.failure(
|
||||||
|
"Failed to finalize deployment: " + e.getMessage(),
|
||||||
|
e.toString()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/upload/status/{uploadSessionId}")
|
||||||
|
public ResponseEntity<?> getUploadStatus(@PathVariable String uploadSessionId) {
|
||||||
|
try {
|
||||||
|
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||||
|
if (session == null) {
|
||||||
|
return ResponseEntity.status(HttpStatus.NOT_FOUND)
|
||||||
|
.body("Upload session not found or expired");
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResponseEntity.ok(session.getFileStates());
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Error getting upload status", e);
|
||||||
|
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||||
|
.body("Failed to get upload status: " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void validateFiles(MultipartFile jarFile, MultipartFile manifest) {
|
private void validateFiles(MultipartFile jarFile, MultipartFile manifest) {
|
||||||
if (jarFile.isEmpty()) {
|
if (jarFile.isEmpty()) {
|
||||||
throw new IllegalArgumentException("JAR file is empty");
|
throw new IllegalArgumentException("JAR file is empty");
|
||||||
|
|||||||
26
src/main/java/com/cfdeployer/model/ChunkUploadRequest.java
Normal file
26
src/main/java/com/cfdeployer/model/ChunkUploadRequest.java
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package com.cfdeployer.model;
|
||||||
|
|
||||||
|
import jakarta.validation.constraints.Min;
|
||||||
|
import jakarta.validation.constraints.NotBlank;
|
||||||
|
import jakarta.validation.constraints.NotNull;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public class ChunkUploadRequest {
|
||||||
|
|
||||||
|
@NotBlank(message = "Upload session ID is required")
|
||||||
|
private String uploadSessionId;
|
||||||
|
|
||||||
|
@NotBlank(message = "File type is required (jarFile or manifest)")
|
||||||
|
private String fileType; // "jarFile" or "manifest"
|
||||||
|
|
||||||
|
@NotNull(message = "Chunk index is required")
|
||||||
|
@Min(value = 0, message = "Chunk index must be non-negative")
|
||||||
|
private Integer chunkIndex;
|
||||||
|
|
||||||
|
@NotNull(message = "Total chunks is required")
|
||||||
|
@Min(value = 1, message = "Total chunks must be at least 1")
|
||||||
|
private Integer totalChunks;
|
||||||
|
|
||||||
|
private String fileName;
|
||||||
|
}
|
||||||
41
src/main/java/com/cfdeployer/model/ChunkUploadResponse.java
Normal file
41
src/main/java/com/cfdeployer/model/ChunkUploadResponse.java
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package com.cfdeployer.model;
|
||||||
|
|
||||||
|
import lombok.AllArgsConstructor;
|
||||||
|
import lombok.Builder;
|
||||||
|
import lombok.Data;
|
||||||
|
import lombok.NoArgsConstructor;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
@Builder
|
||||||
|
@NoArgsConstructor
|
||||||
|
@AllArgsConstructor
|
||||||
|
public class ChunkUploadResponse {
|
||||||
|
|
||||||
|
private Boolean success;
|
||||||
|
private String uploadSessionId;
|
||||||
|
private String fileType;
|
||||||
|
private Integer chunkIndex;
|
||||||
|
private Integer totalChunks;
|
||||||
|
private Integer receivedChunks;
|
||||||
|
private String message;
|
||||||
|
|
||||||
|
public static ChunkUploadResponse success(String uploadSessionId, String fileType,
|
||||||
|
Integer chunkIndex, Integer totalChunks, Integer receivedChunks) {
|
||||||
|
return ChunkUploadResponse.builder()
|
||||||
|
.success(true)
|
||||||
|
.uploadSessionId(uploadSessionId)
|
||||||
|
.fileType(fileType)
|
||||||
|
.chunkIndex(chunkIndex)
|
||||||
|
.totalChunks(totalChunks)
|
||||||
|
.receivedChunks(receivedChunks)
|
||||||
|
.message("Chunk uploaded successfully")
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ChunkUploadResponse failure(String message) {
|
||||||
|
return ChunkUploadResponse.builder()
|
||||||
|
.success(false)
|
||||||
|
.message(message)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
}
|
||||||
61
src/main/java/com/cfdeployer/model/UploadSession.java
Normal file
61
src/main/java/com/cfdeployer/model/UploadSession.java
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package com.cfdeployer.model;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public class UploadSession {
|
||||||
|
|
||||||
|
private String sessionId;
|
||||||
|
private String requestJson;
|
||||||
|
private Path workingDirectory;
|
||||||
|
private LocalDateTime createdAt;
|
||||||
|
private LocalDateTime lastAccessedAt;
|
||||||
|
|
||||||
|
// File type -> chunk tracking
|
||||||
|
private Map<String, FileUploadState> fileStates;
|
||||||
|
|
||||||
|
public UploadSession(String sessionId, String requestJson, Path workingDirectory) {
|
||||||
|
this.sessionId = sessionId;
|
||||||
|
this.requestJson = requestJson;
|
||||||
|
this.workingDirectory = workingDirectory;
|
||||||
|
this.createdAt = LocalDateTime.now();
|
||||||
|
this.lastAccessedAt = LocalDateTime.now();
|
||||||
|
this.fileStates = new ConcurrentHashMap<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void updateLastAccessed() {
|
||||||
|
this.lastAccessedAt = LocalDateTime.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public static class FileUploadState {
|
||||||
|
private String fileName;
|
||||||
|
private int totalChunks;
|
||||||
|
private Map<Integer, Boolean> receivedChunks;
|
||||||
|
private Path targetPath;
|
||||||
|
|
||||||
|
public FileUploadState(String fileName, int totalChunks, Path targetPath) {
|
||||||
|
this.fileName = fileName;
|
||||||
|
this.totalChunks = totalChunks;
|
||||||
|
this.targetPath = targetPath;
|
||||||
|
this.receivedChunks = new ConcurrentHashMap<>();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void markChunkReceived(int chunkIndex) {
|
||||||
|
receivedChunks.put(chunkIndex, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isComplete() {
|
||||||
|
return receivedChunks.size() == totalChunks;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getReceivedChunkCount() {
|
||||||
|
return receivedChunks.size();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -84,6 +84,41 @@ public class CfCliService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public CfDeployResponse deployApplicationFromPaths(CfDeployRequest request, Path jarPath, Path manifestPath) {
|
||||||
|
try {
|
||||||
|
log.info("=== Starting deployment from paths for app: {} ===", request.getAppName());
|
||||||
|
log.info("Target: {}/{}/{}", request.getApiEndpoint(), request.getOrganization(), request.getSpace());
|
||||||
|
log.info("JAR path: {}", jarPath);
|
||||||
|
log.info("Manifest path: {}", manifestPath);
|
||||||
|
|
||||||
|
// Validate files exist
|
||||||
|
if (!Files.exists(jarPath)) {
|
||||||
|
throw new IOException("JAR file not found at: " + jarPath);
|
||||||
|
}
|
||||||
|
if (!Files.exists(manifestPath)) {
|
||||||
|
throw new IOException("Manifest file not found at: " + manifestPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info("JAR file size: {} bytes", Files.size(jarPath));
|
||||||
|
log.info("Manifest file size: {} bytes", Files.size(manifestPath));
|
||||||
|
|
||||||
|
StringBuilder output = new StringBuilder();
|
||||||
|
|
||||||
|
login(request, output);
|
||||||
|
pushApplication(request, manifestPath.getParent(), jarPath, output);
|
||||||
|
logout(output);
|
||||||
|
|
||||||
|
log.info("=== Deployment completed successfully for app: {} ===", request.getAppName());
|
||||||
|
return CfDeployResponse.success(output.toString());
|
||||||
|
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("=== Deployment failed for app: {} ===", request.getAppName());
|
||||||
|
log.error("Error type: {}", e.getClass().getName());
|
||||||
|
log.error("Error message: {}", e.getMessage(), e);
|
||||||
|
return CfDeployResponse.failure(e.getMessage(), e.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void login(CfDeployRequest request, StringBuilder output) throws Exception {
|
private void login(CfDeployRequest request, StringBuilder output) throws Exception {
|
||||||
log.info("Logging into Cloud Foundry at: {}", request.getApiEndpoint());
|
log.info("Logging into Cloud Foundry at: {}", request.getApiEndpoint());
|
||||||
|
|
||||||
@@ -224,7 +259,8 @@ public class CfCliService {
|
|||||||
log.info("Created temp file: {}", tempFile.getAbsolutePath());
|
log.info("Created temp file: {}", tempFile.getAbsolutePath());
|
||||||
|
|
||||||
// Copy from direct file path to temp file
|
// Copy from direct file path to temp file
|
||||||
long bytesCopied = Files.copy(directPath, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
|
Files.copy(directPath, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
|
||||||
|
long bytesCopied = Files.size(tempFile.toPath());
|
||||||
log.info("Copied CF CLI to temp file: {} ({} bytes)", tempFile.getAbsolutePath(), bytesCopied);
|
log.info("Copied CF CLI to temp file: {} ({} bytes)", tempFile.getAbsolutePath(), bytesCopied);
|
||||||
|
|
||||||
if (bytesCopied == 0) {
|
if (bytesCopied == 0) {
|
||||||
|
|||||||
161
src/main/java/com/cfdeployer/service/ChunkedUploadService.java
Normal file
161
src/main/java/com/cfdeployer/service/ChunkedUploadService.java
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package com.cfdeployer.service;
|
||||||
|
|
||||||
|
import com.cfdeployer.model.UploadSession;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.scheduling.annotation.Scheduled;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import org.springframework.web.multipart.MultipartFile;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.RandomAccessFile;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
@Service
|
||||||
|
@Slf4j
|
||||||
|
public class ChunkedUploadService {
|
||||||
|
|
||||||
|
@Value("${cf.upload.session.timeout-minutes:30}")
|
||||||
|
private int sessionTimeoutMinutes;
|
||||||
|
|
||||||
|
private final Map<String, UploadSession> activeSessions = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
public String createUploadSession(String requestJson) throws IOException {
|
||||||
|
String sessionId = UUID.randomUUID().toString();
|
||||||
|
Path workingDir = Files.createTempDirectory("cf-upload-" + sessionId);
|
||||||
|
|
||||||
|
UploadSession session = new UploadSession(sessionId, requestJson, workingDir);
|
||||||
|
activeSessions.put(sessionId, session);
|
||||||
|
|
||||||
|
log.info("Created upload session: {} at {}", sessionId, workingDir);
|
||||||
|
return sessionId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void uploadChunk(String sessionId, String fileType, String fileName,
|
||||||
|
int chunkIndex, int totalChunks, MultipartFile chunk) throws IOException {
|
||||||
|
UploadSession session = activeSessions.get(sessionId);
|
||||||
|
if (session == null) {
|
||||||
|
throw new IllegalArgumentException("Upload session not found or expired: " + sessionId);
|
||||||
|
}
|
||||||
|
|
||||||
|
session.updateLastAccessed();
|
||||||
|
|
||||||
|
// Get or create file upload state
|
||||||
|
UploadSession.FileUploadState fileState = session.getFileStates()
|
||||||
|
.computeIfAbsent(fileType, k -> {
|
||||||
|
String targetFileName = fileType.equals("manifest") ? "manifest.yml" : fileName;
|
||||||
|
Path targetPath = session.getWorkingDirectory().resolve(targetFileName);
|
||||||
|
return new UploadSession.FileUploadState(fileName, totalChunks, targetPath);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Validate total chunks consistency
|
||||||
|
if (fileState.getTotalChunks() != totalChunks) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
String.format("Total chunks mismatch for %s: expected %d, got %d",
|
||||||
|
fileType, fileState.getTotalChunks(), totalChunks));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write chunk to file
|
||||||
|
Path targetPath = fileState.getTargetPath();
|
||||||
|
long offset = (long) chunkIndex * getChunkSize();
|
||||||
|
|
||||||
|
try (RandomAccessFile raf = new RandomAccessFile(targetPath.toFile(), "rw")) {
|
||||||
|
raf.seek(offset);
|
||||||
|
byte[] data = chunk.getBytes();
|
||||||
|
raf.write(data);
|
||||||
|
log.debug("Wrote chunk {} ({} bytes) to {} at offset {}",
|
||||||
|
chunkIndex, data.length, targetPath.getFileName(), offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
fileState.markChunkReceived(chunkIndex);
|
||||||
|
log.info("Session {}: Received chunk {}/{} for {} ({} bytes)",
|
||||||
|
sessionId, chunkIndex + 1, totalChunks, fileType, chunk.getSize());
|
||||||
|
|
||||||
|
if (fileState.isComplete()) {
|
||||||
|
log.info("Session {}: File {} upload completed ({} chunks)",
|
||||||
|
sessionId, fileType, totalChunks);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public UploadSession getSession(String sessionId) {
|
||||||
|
UploadSession session = activeSessions.get(sessionId);
|
||||||
|
if (session != null) {
|
||||||
|
session.updateLastAccessed();
|
||||||
|
}
|
||||||
|
return session;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isSessionReady(String sessionId) {
|
||||||
|
UploadSession session = activeSessions.get(sessionId);
|
||||||
|
if (session == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if both jarFile and manifest are complete
|
||||||
|
UploadSession.FileUploadState jarState = session.getFileStates().get("jarFile");
|
||||||
|
UploadSession.FileUploadState manifestState = session.getFileStates().get("manifest");
|
||||||
|
|
||||||
|
return jarState != null && jarState.isComplete() &&
|
||||||
|
manifestState != null && manifestState.isComplete();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void deleteSession(String sessionId) {
|
||||||
|
UploadSession session = activeSessions.remove(sessionId);
|
||||||
|
if (session != null) {
|
||||||
|
cleanupSessionDirectory(session);
|
||||||
|
log.info("Deleted upload session: {}", sessionId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Scheduled(fixedRate = 300000) // Run every 5 minutes
|
||||||
|
public void cleanupExpiredSessions() {
|
||||||
|
LocalDateTime expirationTime = LocalDateTime.now().minusMinutes(sessionTimeoutMinutes);
|
||||||
|
int cleanedCount = 0;
|
||||||
|
|
||||||
|
for (Map.Entry<String, UploadSession> entry : activeSessions.entrySet()) {
|
||||||
|
if (entry.getValue().getLastAccessedAt().isBefore(expirationTime)) {
|
||||||
|
deleteSession(entry.getKey());
|
||||||
|
cleanedCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cleanedCount > 0) {
|
||||||
|
log.info("Cleaned up {} expired upload sessions", cleanedCount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void cleanupSessionDirectory(UploadSession session) {
|
||||||
|
try {
|
||||||
|
Path workingDir = session.getWorkingDirectory();
|
||||||
|
if (Files.exists(workingDir)) {
|
||||||
|
Files.walk(workingDir)
|
||||||
|
.sorted(Comparator.reverseOrder())
|
||||||
|
.forEach(path -> {
|
||||||
|
try {
|
||||||
|
Files.delete(path);
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.warn("Failed to delete file: {}", path, e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
log.debug("Cleaned up session directory: {}", workingDir);
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
log.warn("Failed to clean up session directory for session: {}", session.getSessionId(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private int getChunkSize() {
|
||||||
|
// Default chunk size - should match client-side
|
||||||
|
return 5 * 1024 * 1024; // 5MB
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getActiveSessionCount() {
|
||||||
|
return activeSessions.size();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@ server.port=8080
|
|||||||
# Application Name
|
# Application Name
|
||||||
spring.application.name=cf-deployer
|
spring.application.name=cf-deployer
|
||||||
|
|
||||||
# Multipart Configuration
|
# Multipart Configuration - for traditional single upload endpoint
|
||||||
spring.servlet.multipart.max-file-size=500MB
|
spring.servlet.multipart.max-file-size=500MB
|
||||||
spring.servlet.multipart.max-request-size=500MB
|
spring.servlet.multipart.max-request-size=500MB
|
||||||
spring.servlet.multipart.enabled=true
|
spring.servlet.multipart.enabled=true
|
||||||
@@ -13,6 +13,12 @@ spring.servlet.multipart.enabled=true
|
|||||||
cf.cli.timeout=600
|
cf.cli.timeout=600
|
||||||
cf.cli.path=
|
cf.cli.path=
|
||||||
|
|
||||||
|
# Chunked Upload Configuration
|
||||||
|
# Recommended chunk size: 5MB (client-side should match this)
|
||||||
|
cf.upload.chunk.size=5242880
|
||||||
|
# Session timeout in minutes (default: 30 minutes)
|
||||||
|
cf.upload.session.timeout-minutes=30
|
||||||
|
|
||||||
# Logging Configuration
|
# Logging Configuration
|
||||||
logging.level.root=INFO
|
logging.level.root=INFO
|
||||||
logging.level.com.cfdeployer=DEBUG
|
logging.level.com.cfdeployer=DEBUG
|
||||||
|
|||||||
Reference in New Issue
Block a user