Compare commits
23 Commits
7d1132cb78
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
086a77daab | ||
|
|
2c9641ea8a | ||
|
|
c35cd7092a | ||
|
|
23dacdd0c2 | ||
|
|
4e24d93a5a | ||
|
|
05d452c250 | ||
|
|
da16787fb2 | ||
|
|
7df4064cf5 | ||
|
|
4c2f46a725 | ||
|
|
4db1285f88 | ||
|
|
d0f9139146 | ||
|
|
c3554e84ab | ||
|
|
ab07cd6d92 | ||
|
|
401d53da50 | ||
|
|
b395b310e1 | ||
|
|
f1910b36cb | ||
|
|
304a781c19 | ||
|
|
72708bbf91 | ||
|
|
34567cd813 | ||
|
|
b81ab51100 | ||
|
|
6e3684b580 | ||
|
|
b05bee8f28 | ||
|
|
fdcc92eeb6 |
@@ -6,7 +6,10 @@
|
||||
"Bash(gradle wrapper:*)",
|
||||
"Bash(./gradlew build:*)",
|
||||
"Bash(./gradlew clean build:*)",
|
||||
"Bash(jar:*)"
|
||||
"Bash(jar:*)",
|
||||
"Bash(git log:*)",
|
||||
"Bash(dir /B *.sh *.md)",
|
||||
"Bash(chmod:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -76,3 +76,7 @@ src/main/resources/cf-cli/
|
||||
# Test output
|
||||
test-output/
|
||||
target/
|
||||
frontend/package-lock.json
|
||||
|
||||
# Frontend local development environment (contains sensitive credentials)
|
||||
frontend/src/environments/environment.local.ts
|
||||
|
||||
402
CHUNKED_UPLOAD_GUIDE.md
Normal file
402
CHUNKED_UPLOAD_GUIDE.md
Normal file
@@ -0,0 +1,402 @@
|
||||
# Chunked Upload Implementation Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This application now supports chunked file uploads to avoid nginx 413 "Request Entity Too Large" errors when deploying large JAR files through a load balancer.
|
||||
|
||||
## How It Works
|
||||
|
||||
Instead of uploading the entire JAR and manifest files in a single request, files are split into smaller chunks (default 5MB) and uploaded sequentially. The server reassembles the chunks before deployment.
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### 1. Initialize Upload Session
|
||||
**POST** `/api/cf/upload/init`
|
||||
|
||||
Creates a new upload session and returns a session ID.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"apiEndpoint": "https://api.cf.example.com",
|
||||
"username": "your-username",
|
||||
"password": "your-password",
|
||||
"organization": "your-org",
|
||||
"space": "your-space",
|
||||
"appName": "your-app",
|
||||
"skipSslValidation": false
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"message": "Upload session created successfully"
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Upload File Chunk
|
||||
**POST** `/api/cf/upload/chunk`
|
||||
|
||||
Upload a single chunk of a file.
|
||||
|
||||
**Request Parameters:**
|
||||
- `uploadSessionId` (string): The session ID from step 1
|
||||
- `fileType` (string): Either "jarFile" or "manifest"
|
||||
- `chunkIndex` (integer): Zero-based index of this chunk (0, 1, 2, ...)
|
||||
- `totalChunks` (integer): Total number of chunks for this file
|
||||
- `fileName` (string, optional): Original filename (required for jarFile)
|
||||
- `chunk` (multipart file): The chunk data
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"fileType": "jarFile",
|
||||
"chunkIndex": 0,
|
||||
"totalChunks": 10,
|
||||
"receivedChunks": 1,
|
||||
"message": "Chunk uploaded successfully"
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Get Upload Status
|
||||
**GET** `/api/cf/upload/status/{uploadSessionId}`
|
||||
|
||||
Check the status of an upload session.
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"jarFile": {
|
||||
"fileName": "myapp.jar",
|
||||
"totalChunks": 10,
|
||||
"receivedChunks": {
|
||||
"0": true,
|
||||
"1": true,
|
||||
"2": true
|
||||
}
|
||||
},
|
||||
"manifest": {
|
||||
"fileName": "manifest.yml",
|
||||
"totalChunks": 1,
|
||||
"receivedChunks": {
|
||||
"0": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Finalize Upload and Deploy
|
||||
**POST** `/api/cf/upload/finalize?uploadSessionId={sessionId}`
|
||||
|
||||
Triggers the deployment after all chunks are uploaded.
|
||||
|
||||
**Response:**
|
||||
Same as the traditional `/api/cf/deploy` endpoint.
|
||||
|
||||
## Client Implementation Example (JavaScript)
|
||||
|
||||
```javascript
|
||||
// You can use ANY chunk size - server supports variable chunk sizes!
|
||||
// Recommended: 1-2MB for Tanzu with memory constraints
|
||||
const CHUNK_SIZE = 1 * 1024 * 1024; // 1MB
|
||||
// Other options:
|
||||
// const CHUNK_SIZE = 512 * 1024; // 512KB (very safe)
|
||||
// const CHUNK_SIZE = 2 * 1024 * 1024; // 2MB (balanced)
|
||||
// const CHUNK_SIZE = 5 * 1024 * 1024; // 5MB (if you have memory)
|
||||
|
||||
async function deployWithChunks(jarFile, manifestFile, deploymentConfig) {
|
||||
const apiBase = 'https://your-app.example.com/api/cf';
|
||||
|
||||
// Step 1: Initialize upload session
|
||||
const initResponse = await fetch(`${apiBase}/upload/init`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(deploymentConfig)
|
||||
});
|
||||
|
||||
const { uploadSessionId } = await initResponse.json();
|
||||
console.log('Upload session created:', uploadSessionId);
|
||||
|
||||
// Step 2: Upload JAR file in chunks
|
||||
await uploadFileInChunks(apiBase, uploadSessionId, 'jarFile', jarFile);
|
||||
|
||||
// Step 3: Upload manifest file in chunks
|
||||
await uploadFileInChunks(apiBase, uploadSessionId, 'manifest', manifestFile);
|
||||
|
||||
// Step 4: Finalize and deploy
|
||||
const deployResponse = await fetch(
|
||||
`${apiBase}/upload/finalize?uploadSessionId=${uploadSessionId}`,
|
||||
{ method: 'POST' }
|
||||
);
|
||||
|
||||
const result = await deployResponse.json();
|
||||
console.log('Deployment result:', result);
|
||||
return result;
|
||||
}
|
||||
|
||||
async function uploadFileInChunks(apiBase, sessionId, fileType, file) {
|
||||
const totalChunks = Math.ceil(file.size / CHUNK_SIZE);
|
||||
console.log(`Uploading ${fileType}: ${file.name} (${totalChunks} chunks)`);
|
||||
|
||||
for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) {
|
||||
const start = chunkIndex * CHUNK_SIZE;
|
||||
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||
const chunk = file.slice(start, end);
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append('chunk', chunk);
|
||||
formData.append('uploadSessionId', sessionId);
|
||||
formData.append('fileType', fileType);
|
||||
formData.append('chunkIndex', chunkIndex);
|
||||
formData.append('totalChunks', totalChunks);
|
||||
formData.append('fileName', file.name);
|
||||
|
||||
const response = await fetch(`${apiBase}/upload/chunk`, {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
console.log(`Chunk ${chunkIndex + 1}/${totalChunks} uploaded for ${fileType}`);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(`Failed to upload chunk: ${result.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`${fileType} upload complete`);
|
||||
}
|
||||
|
||||
// Usage
|
||||
const jarInput = document.getElementById('jarFile');
|
||||
const manifestInput = document.getElementById('manifestFile');
|
||||
|
||||
const config = {
|
||||
apiEndpoint: 'https://api.cf.example.com',
|
||||
username: 'user',
|
||||
password: 'pass',
|
||||
organization: 'my-org',
|
||||
space: 'dev',
|
||||
appName: 'my-app',
|
||||
skipSslValidation: false
|
||||
};
|
||||
|
||||
deployWithChunks(jarInput.files[0], manifestInput.files[0], config)
|
||||
.then(result => console.log('Success:', result))
|
||||
.catch(error => console.error('Error:', error));
|
||||
```
|
||||
|
||||
## Client Implementation Example (Python)
|
||||
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
# You can use ANY chunk size!
|
||||
CHUNK_SIZE = 1 * 1024 * 1024 # 1MB (recommended for Tanzu)
|
||||
|
||||
def deploy_with_chunks(api_base, jar_path, manifest_path, deployment_config):
|
||||
# Step 1: Initialize upload session
|
||||
response = requests.post(
|
||||
f"{api_base}/upload/init",
|
||||
json=deployment_config
|
||||
)
|
||||
session_id = response.json()['uploadSessionId']
|
||||
print(f"Upload session created: {session_id}")
|
||||
|
||||
# Step 2: Upload JAR file in chunks
|
||||
upload_file_in_chunks(api_base, session_id, 'jarFile', jar_path)
|
||||
|
||||
# Step 3: Upload manifest file in chunks
|
||||
upload_file_in_chunks(api_base, session_id, 'manifest', manifest_path)
|
||||
|
||||
# Step 4: Finalize and deploy
|
||||
response = requests.post(
|
||||
f"{api_base}/upload/finalize",
|
||||
params={'uploadSessionId': session_id}
|
||||
)
|
||||
|
||||
result = response.json()
|
||||
print(f"Deployment result: {result}")
|
||||
return result
|
||||
|
||||
def upload_file_in_chunks(api_base, session_id, file_type, file_path):
|
||||
file_size = os.path.getsize(file_path)
|
||||
total_chunks = (file_size + CHUNK_SIZE - 1) // CHUNK_SIZE
|
||||
file_name = os.path.basename(file_path)
|
||||
|
||||
print(f"Uploading {file_type}: {file_name} ({total_chunks} chunks)")
|
||||
|
||||
with open(file_path, 'rb') as f:
|
||||
for chunk_index in range(total_chunks):
|
||||
chunk_data = f.read(CHUNK_SIZE)
|
||||
|
||||
files = {'chunk': (f'chunk_{chunk_index}', chunk_data)}
|
||||
data = {
|
||||
'uploadSessionId': session_id,
|
||||
'fileType': file_type,
|
||||
'chunkIndex': chunk_index,
|
||||
'totalChunks': total_chunks,
|
||||
'fileName': file_name
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{api_base}/upload/chunk",
|
||||
files=files,
|
||||
data=data
|
||||
)
|
||||
|
||||
result = response.json()
|
||||
print(f"Chunk {chunk_index + 1}/{total_chunks} uploaded for {file_type}")
|
||||
|
||||
if not result.get('success'):
|
||||
raise Exception(f"Failed to upload chunk: {result.get('message')}")
|
||||
|
||||
print(f"{file_type} upload complete")
|
||||
|
||||
# Usage
|
||||
config = {
|
||||
'apiEndpoint': 'https://api.cf.example.com',
|
||||
'username': 'user',
|
||||
'password': 'pass',
|
||||
'organization': 'my-org',
|
||||
'space': 'dev',
|
||||
'appName': 'my-app',
|
||||
'skipSslValidation': False
|
||||
}
|
||||
|
||||
deploy_with_chunks(
|
||||
'https://your-app.example.com/api/cf',
|
||||
'/path/to/app.jar',
|
||||
'/path/to/manifest.yml',
|
||||
config
|
||||
)
|
||||
```
|
||||
|
||||
## Nginx Configuration
|
||||
|
||||
For the chunked upload to work properly with nginx, you need minimal configuration changes:
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 80;
|
||||
server_name your-app.example.com;
|
||||
|
||||
# Important: Set client_max_body_size for individual chunks
|
||||
# This should be slightly larger than your chunk size (5MB chunks -> 10MB limit)
|
||||
client_max_body_size 10m;
|
||||
|
||||
# Increase timeouts for long deployments
|
||||
proxy_read_timeout 900s;
|
||||
proxy_connect_timeout 900s;
|
||||
proxy_send_timeout 900s;
|
||||
|
||||
location /api/cf/ {
|
||||
proxy_pass http://cf-deployer-backend:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Buffer settings for chunked uploads
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Key Nginx Settings:
|
||||
|
||||
1. **client_max_body_size**: Set to ~10MB (double your chunk size for safety)
|
||||
2. **proxy_buffering off**: Prevents nginx from buffering the entire request
|
||||
3. **proxy_request_buffering off**: Allows streaming of request body
|
||||
4. **Increased timeouts**: CF deployments can take several minutes
|
||||
|
||||
## Configuration Properties
|
||||
|
||||
### application.properties
|
||||
|
||||
```properties
|
||||
# Chunked Upload Configuration
|
||||
cf.upload.session.timeout-minutes=30
|
||||
```
|
||||
|
||||
- **cf.upload.session.timeout-minutes**: How long inactive sessions are kept (default: 30 minutes)
|
||||
|
||||
**Note:** There is NO server-side chunk size configuration. The server accepts ANY chunk size from the client. Chunks are appended sequentially as they arrive.
|
||||
|
||||
## Session Management
|
||||
|
||||
- Upload sessions expire after 30 minutes of inactivity (configurable)
|
||||
- Expired sessions are automatically cleaned up every 5 minutes
|
||||
- Sessions are deleted after successful deployment
|
||||
- Each session maintains its own temporary directory
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Errors:
|
||||
|
||||
1. **"Upload session not found or expired"**
|
||||
- Session timed out (default: 30 minutes)
|
||||
- Invalid session ID
|
||||
- Solution: Create a new upload session
|
||||
|
||||
2. **"Upload incomplete. Not all file chunks received"**
|
||||
- Not all chunks were uploaded before calling finalize
|
||||
- Solution: Check upload status and retry missing chunks
|
||||
|
||||
3. **"Total chunks mismatch"**
|
||||
- Different totalChunks value sent for the same file
|
||||
- Solution: Ensure consistent totalChunks across all chunk uploads
|
||||
|
||||
## Migration from Traditional Upload
|
||||
|
||||
The traditional `/api/cf/deploy` endpoint remains available and functional. You can:
|
||||
|
||||
1. **Keep using the traditional endpoint** for deployments behind nginx if you increase nginx `client_max_body_size` to 500MB+
|
||||
2. **Migrate to chunked uploads** for better reliability and to avoid nginx 413 errors without increasing limits
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- **Chunk size**: Client controls this completely
|
||||
- **Smaller chunks (512KB-1MB)**: More requests, but safer for memory-constrained servers and strict proxies
|
||||
- **Larger chunks (5-10MB)**: Fewer requests, faster uploads, but needs more memory
|
||||
- **Recommended for Tanzu**: 1MB (good balance for low-memory environments)
|
||||
- **Any size works**: Server accepts variable chunk sizes
|
||||
|
||||
- **Sequential upload requirement**: **CRITICAL**
|
||||
- Chunks **MUST** be uploaded in order: 0, 1, 2, 3...
|
||||
- Server validates and enforces sequential order
|
||||
- Out-of-order chunks will be rejected
|
||||
- This is necessary because chunks are appended sequentially to the file
|
||||
|
||||
- **Network reliability**: Chunked uploads are more resilient
|
||||
- Failed chunks can be retried individually
|
||||
- No need to re-upload the entire file on failure
|
||||
- Just retry the specific failed chunk index
|
||||
|
||||
## Monitoring
|
||||
|
||||
Check active upload sessions:
|
||||
|
||||
```bash
|
||||
# The ChunkedUploadService tracks active sessions
|
||||
# Monitor via application logs or add a custom endpoint
|
||||
```
|
||||
|
||||
Example log output:
|
||||
```
|
||||
2025-10-21 10:15:30 - Created upload session: 550e8400-e29b-41d4-a716-446655440000
|
||||
2025-10-21 10:15:31 - Session 550e8400...: Received chunk 1/10 for jarFile (5242880 bytes)
|
||||
2025-10-21 10:15:32 - Session 550e8400...: Received chunk 2/10 for jarFile (5242880 bytes)
|
||||
...
|
||||
2025-10-21 10:16:00 - Session 550e8400...: File jarFile upload completed (10 chunks)
|
||||
2025-10-21 10:16:01 - Starting deployment for app: my-app from session: 550e8400...
|
||||
2025-10-21 10:18:00 - Deployment completed successfully
|
||||
2025-10-21 10:18:00 - Deleted upload session: 550e8400-e29b-41d4-a716-446655440000
|
||||
```
|
||||
112
CHUNK_SIZE_GUIDE.md
Normal file
112
CHUNK_SIZE_GUIDE.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# Chunk Size Quick Reference
|
||||
|
||||
## TL;DR
|
||||
|
||||
**You control the chunk size in your upload script.** The server doesn't care what size you use - it accepts ANY chunk size and reassembles them sequentially.
|
||||
|
||||
## Recommended Sizes
|
||||
|
||||
| Environment | Chunk Size | Reason |
|
||||
|-------------|-----------|--------|
|
||||
| **Tanzu (low memory)** | **1MB** | Safe for 10MB direct memory limit |
|
||||
| **Strict nginx** | **512KB - 1MB** | Works with any nginx config |
|
||||
| **Normal setup** | **2-5MB** | Good balance of speed vs safety |
|
||||
| **High bandwidth** | **5-10MB** | Faster uploads, fewer requests |
|
||||
|
||||
## Setting Chunk Size
|
||||
|
||||
### Bash Script
|
||||
```bash
|
||||
CHUNK_SIZE=1048576 # 1MB
|
||||
```
|
||||
|
||||
### JavaScript
|
||||
```javascript
|
||||
const CHUNK_SIZE = 1 * 1024 * 1024; // 1MB
|
||||
```
|
||||
|
||||
### Python
|
||||
```python
|
||||
CHUNK_SIZE = 1 * 1024 * 1024 # 1MB
|
||||
```
|
||||
|
||||
## Common Sizes in Bytes
|
||||
|
||||
| Size | Bytes | Setting |
|
||||
|------|-------|---------|
|
||||
| 100KB | 102,400 | `CHUNK_SIZE=102400` |
|
||||
| 256KB | 262,144 | `CHUNK_SIZE=262144` |
|
||||
| 512KB | 524,288 | `CHUNK_SIZE=524288` |
|
||||
| **1MB** | **1,048,576** | **`CHUNK_SIZE=1048576`** ✅ |
|
||||
| **2MB** | **2,097,152** | **`CHUNK_SIZE=2097152`** ✅ |
|
||||
| 5MB | 5,242,880 | `CHUNK_SIZE=5242880` |
|
||||
| 10MB | 10,485,760 | `CHUNK_SIZE=10485760` |
|
||||
|
||||
## Trade-offs
|
||||
|
||||
### Smaller Chunks (100KB - 1MB)
|
||||
✅ Less memory per request
|
||||
✅ Works with ANY nginx config
|
||||
✅ Safe for Tanzu low-memory instances
|
||||
❌ More HTTP requests
|
||||
❌ Slower overall upload
|
||||
|
||||
### Larger Chunks (5MB - 10MB)
|
||||
✅ Fewer HTTP requests
|
||||
✅ Faster overall upload
|
||||
❌ More memory needed
|
||||
❌ May exceed nginx limits
|
||||
❌ Can cause OutOfMemoryError on Tanzu
|
||||
|
||||
## For Your Tanzu Issue
|
||||
|
||||
Based on your `OutOfMemoryError: Cannot reserve 10485760 bytes of direct buffer memory`:
|
||||
|
||||
**Use 1MB chunks:**
|
||||
```bash
|
||||
CHUNK_SIZE=1048576 # 1MB
|
||||
```
|
||||
|
||||
This keeps each request under 1MB, well below your 10MB direct memory limit, leaving plenty of headroom for multiple concurrent requests and garbage collection delays.
|
||||
|
||||
## Testing
|
||||
|
||||
Quick test with different chunk sizes:
|
||||
|
||||
```bash
|
||||
# Test with 512KB chunks
|
||||
CHUNK_SIZE=524288 ./deploy-chunked.sh
|
||||
|
||||
# Test with 1MB chunks
|
||||
CHUNK_SIZE=1048576 ./deploy-chunked.sh
|
||||
|
||||
# Test with 2MB chunks
|
||||
CHUNK_SIZE=2097152 ./deploy-chunked.sh
|
||||
```
|
||||
|
||||
Watch the logs:
|
||||
```bash
|
||||
cf logs cf-deployer --recent | grep "Received chunk"
|
||||
```
|
||||
|
||||
If you see OutOfMemoryError, use smaller chunks.
|
||||
|
||||
## Rules
|
||||
|
||||
1. **Chunks MUST be uploaded in order**: 0, 1, 2, 3... (enforced by server)
|
||||
2. **All chunks of the same file MUST use the same chunk size** (except the last chunk, which can be smaller)
|
||||
3. **Different files can use different chunk sizes** (jarFile vs manifest can differ)
|
||||
4. **Total chunks must be accurate**: Calculate as `ceil(file_size / chunk_size)`
|
||||
|
||||
## Example
|
||||
|
||||
For a 50MB JAR file:
|
||||
|
||||
| Chunk Size | Number of Chunks | Total Requests |
|
||||
|-----------|-----------------|----------------|
|
||||
| 512KB | 100 chunks | ~100 requests |
|
||||
| 1MB | 50 chunks | ~50 requests |
|
||||
| 2MB | 25 chunks | ~25 requests |
|
||||
| 5MB | 10 chunks | ~10 requests |
|
||||
|
||||
All work equally well - pick based on your constraints!
|
||||
298
DEPLOYMENT_SCRIPTS.md
Normal file
298
DEPLOYMENT_SCRIPTS.md
Normal file
@@ -0,0 +1,298 @@
|
||||
# CF Deployer - Deployment Scripts Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
This repository contains a Spring Boot application for deploying JAR files to Cloud Foundry/Tanzu environments using chunked uploads. There are two deployment scripts designed for different network paths.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ deploy-chunked.sh │──────► nginx ──────► Spring Boot App
|
||||
│ (Direct to nginx) │ (multipart endpoint)
|
||||
└─────────────────────┘
|
||||
|
||||
┌─────────────────────────┐
|
||||
│deploy-chunked-simple.sh │──► Java Proxy ──► nginx ──► Spring Boot App
|
||||
│ (Through Java proxy) │ (adds headers) (base64 endpoint)
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## Deployment Scripts
|
||||
|
||||
### 1. deploy-chunked.sh
|
||||
**Use when**: Direct access to nginx endpoint with cert/key/headers
|
||||
|
||||
**Features**:
|
||||
- Sends chunks as multipart form data (`-F` flags)
|
||||
- Supports client certificates (`--cert`, `--key`)
|
||||
- Supports custom headers (`X-Forwarded-For`, `My-APIM-KEY`)
|
||||
- Uses Spring Boot endpoint: `POST /upload/chunk` (multipart)
|
||||
|
||||
**Configuration**:
|
||||
```bash
|
||||
# Lines 49-55
|
||||
CERT_FILE="/path/to/cert.pem"
|
||||
KEY_FILE="/path/to/key.pem"
|
||||
X_FORWARDED_FOR="192.168.1.100"
|
||||
MY_APIM_KEY="your-api-key"
|
||||
```
|
||||
|
||||
**Curl format**:
|
||||
```bash
|
||||
curl POST /upload/chunk \
|
||||
--cert cert.pem --key key.pem \
|
||||
-H "X-Forwarded-For: ..." \
|
||||
-H "My-APIM-KEY: ..." \
|
||||
-F "uploadSessionId=..." \
|
||||
-F "fileType=..." \
|
||||
-F "chunk=@chunk_file"
|
||||
```
|
||||
|
||||
### 2. deploy-chunked-simple.sh
|
||||
**Use when**: Going through Java proxy that adds cert/headers automatically
|
||||
|
||||
**Features**:
|
||||
- Sends chunks as Base64-encoded text (to work with Java proxy)
|
||||
- Query parameters in URL (for Java proxy's `request.getQueryString()`)
|
||||
- No cert/key/headers needed (Java proxy adds them)
|
||||
- Uses Spring Boot endpoint: `POST /upload/chunk` (text/plain, base64)
|
||||
|
||||
**Configuration**:
|
||||
```bash
|
||||
# Line 24
|
||||
API_BASE="https://myapp.com/v1/utility"
|
||||
```
|
||||
|
||||
**Curl format**:
|
||||
```bash
|
||||
curl POST /upload/chunk?uploadSessionId=...&fileType=... \
|
||||
-H "Content-Type: text/plain" \
|
||||
-H "X-Chunk-Encoding: base64" \
|
||||
-d @base64_chunk_file
|
||||
```
|
||||
|
||||
## Why Two Different Scripts?
|
||||
|
||||
### The Java Proxy Problem
|
||||
|
||||
The Java proxy that sits in front of the Spring Boot app reads the request body as a String:
|
||||
|
||||
```java
|
||||
@RequestBody(required = false) String body
|
||||
```
|
||||
|
||||
**Problem**: Binary multipart data gets corrupted when read as String
|
||||
**Solution**: Base64 encode chunks as text before sending through the proxy
|
||||
|
||||
### Deploy Script Comparison
|
||||
|
||||
| Feature | deploy-chunked.sh | deploy-chunked-simple.sh |
|
||||
|---------|-------------------|--------------------------|
|
||||
| Network Path | Direct to nginx | Through Java proxy |
|
||||
| Chunk Format | Multipart binary | Base64 text |
|
||||
| Query Params | No (uses `-F` form fields) | Yes (in URL) |
|
||||
| Cert/Key | Required in script | Added by Java proxy |
|
||||
| Headers | Required in script | Added by Java proxy |
|
||||
| Spring Endpoint | multipart/form-data | text/plain |
|
||||
|
||||
## Spring Boot Endpoints
|
||||
|
||||
The Spring Boot app has **three** chunk upload endpoints:
|
||||
|
||||
### 1. Multipart Endpoint (Original)
|
||||
```java
|
||||
@PostMapping("/upload/chunk")
|
||||
// Consumes: multipart/form-data
|
||||
// Parameters: All as form fields (-F)
|
||||
// File: @RequestPart("chunk") MultipartFile
|
||||
```
|
||||
**Used by**: deploy-chunked.sh (direct to nginx)
|
||||
|
||||
### 2. Raw Binary Endpoint
|
||||
```java
|
||||
@PostMapping(value = "/upload/chunk", consumes = "application/octet-stream")
|
||||
// Consumes: application/octet-stream
|
||||
// Parameters: Query params in URL
|
||||
// File: @RequestBody byte[]
|
||||
```
|
||||
**Used by**: Not currently used (would fail through Java proxy)
|
||||
|
||||
### 3. Base64 Text Endpoint
|
||||
```java
|
||||
@PostMapping(value = "/upload/chunk", consumes = "text/plain")
|
||||
// Consumes: text/plain
|
||||
// Parameters: Query params in URL
|
||||
// File: @RequestBody String (Base64 decoded)
|
||||
// Header: X-Chunk-Encoding: base64
|
||||
```
|
||||
**Used by**: deploy-chunked-simple.sh (through Java proxy)
|
||||
|
||||
Spring Boot routes to the correct endpoint based on the `Content-Type` header!
|
||||
|
||||
## Common Configuration (Both Scripts)
|
||||
|
||||
Both scripts share these configuration options:
|
||||
|
||||
```bash
|
||||
# Files to deploy
|
||||
JAR_FILE="./app.jar"
|
||||
MANIFEST_FILE="./manifest.yml"
|
||||
|
||||
# Chunk size (1MB recommended for Tanzu)
|
||||
CHUNK_SIZE=1048576
|
||||
|
||||
# Cloud Foundry configuration
|
||||
CF_API_ENDPOINT="https://api.cf.example.com"
|
||||
CF_USERNAME="your-username"
|
||||
CF_PASSWORD="your-password"
|
||||
CF_ORGANIZATION="your-org"
|
||||
CF_SPACE="your-space"
|
||||
CF_APP_NAME="your-app"
|
||||
CF_SKIP_SSL="false"
|
||||
|
||||
# Polling configuration
|
||||
POLL_INTERVAL=5
|
||||
MAX_WAIT=600
|
||||
|
||||
# Debug mode
|
||||
DEBUG_MODE="false" # Set to "true" for verbose output
|
||||
```
|
||||
|
||||
## Deployment Flow
|
||||
|
||||
Both scripts follow the same 5-step process:
|
||||
|
||||
1. **Initialize Upload Session**: POST `/upload/init` with CF credentials
|
||||
2. **Upload JAR Chunks**: POST `/upload/chunk` for each chunk
|
||||
3. **Upload Manifest Chunks**: POST `/upload/chunk` for manifest.yml
|
||||
4. **Finalize Upload**: POST `/upload/finalize?uploadSessionId=...&async=true`
|
||||
5. **Poll Deployment Status**: GET `/deployment/status/{uploadSessionId}`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Required part 'chunk' is not present"
|
||||
- **Cause**: Nginx stripped multipart body or wrong Content-Type
|
||||
- **Solution**: Use deploy-chunked-simple.sh with Base64 encoding
|
||||
|
||||
### "504 Gateway Timeout" on chunk upload
|
||||
- **Cause**: Java proxy trying to read binary data as String
|
||||
- **Solution**: Use Base64 encoding (deploy-chunked-simple.sh)
|
||||
|
||||
### "Argument list too long"
|
||||
- **Cause**: Base64 string passed as command argument instead of file
|
||||
- **Solution**: Already fixed - script writes Base64 to temp file and uses `-d @file`
|
||||
|
||||
### "Missing uploadSessionId parameter"
|
||||
- **Cause**: Nginx or proxy stripping query parameters
|
||||
- **For deploy-chunked.sh**: Parameters should be in form fields (`-F`)
|
||||
- **For deploy-chunked-simple.sh**: Parameters should be in query string (`?uploadSessionId=...`)
|
||||
|
||||
## Technical Notes
|
||||
|
||||
### Why Not Fix the Java Proxy?
|
||||
|
||||
The Java proxy is shared by multiple services, so modifying it could break other applications. Instead, we adapted the deployment script to work with the proxy's limitations.
|
||||
|
||||
### Why Base64 Encoding?
|
||||
|
||||
When the Java proxy reads binary data as `@RequestBody String body`, it:
|
||||
- Corrupts binary data (non-UTF8 bytes)
|
||||
- May hang or timeout on large binary payloads
|
||||
- Cannot properly forward multipart boundaries
|
||||
|
||||
Base64 encoding converts binary to safe ASCII text that the proxy can handle as a String.
|
||||
|
||||
### Why Query Parameters for Simple Script?
|
||||
|
||||
The Java proxy reconstructs the request using:
|
||||
```java
|
||||
String queryParams = request.getQueryString();
|
||||
String completeRequest = WSGURL + req;
|
||||
if (queryParams != null) {
|
||||
completeRequest = completeRequest + "?" + queryParams;
|
||||
}
|
||||
```
|
||||
|
||||
It only forwards query parameters, not form field parameters, so we must use query strings.
|
||||
|
||||
### Performance Impact of Base64
|
||||
|
||||
Base64 encoding increases payload size by ~33%:
|
||||
- 1MB binary chunk → ~1.33MB Base64 text
|
||||
- Adds CPU overhead for encoding/decoding
|
||||
- Acceptable tradeoff for proxy compatibility
|
||||
|
||||
## Testing
|
||||
|
||||
### Test deploy-chunked.sh (Direct to nginx)
|
||||
```bash
|
||||
# Configure cert/key/headers in script
|
||||
vim deploy-chunked.sh
|
||||
|
||||
# Run with debug
|
||||
DEBUG_MODE="true" ./deploy-chunked.sh
|
||||
```
|
||||
|
||||
### Test deploy-chunked-simple.sh (Through proxy)
|
||||
```bash
|
||||
# Configure API base URL
|
||||
vim deploy-chunked-simple.sh
|
||||
|
||||
# Run with debug
|
||||
DEBUG_MODE="true" ./deploy-chunked-simple.sh
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
### Shell Requirements
|
||||
- `bash` 4.0+
|
||||
- `curl`
|
||||
- `awk` (replaces `bc` for file size calculation)
|
||||
- `base64` (for deploy-chunked-simple.sh)
|
||||
- `mktemp`
|
||||
- `split`
|
||||
- `stat`
|
||||
|
||||
### Backend Requirements
|
||||
- Spring Boot 3.2.0+
|
||||
- Java 17+
|
||||
- Gradle 8.14
|
||||
|
||||
## File Reference
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `deploy-chunked.sh` | Direct nginx deployment with cert/headers |
|
||||
| `deploy-chunked-simple.sh` | Java proxy deployment with Base64 |
|
||||
| `CfDeployController.java` | REST endpoints (3 chunk upload variants) |
|
||||
| `ChunkedUploadService.java` | Chunk processing (multipart + raw bytes) |
|
||||
| `AsyncDeploymentService.java` | Background deployment execution |
|
||||
|
||||
## Quick Start
|
||||
|
||||
**For direct nginx access**:
|
||||
```bash
|
||||
cp deploy-chunked.sh my-deploy.sh
|
||||
# Edit configuration
|
||||
vim my-deploy.sh
|
||||
# Run
|
||||
./my-deploy.sh
|
||||
```
|
||||
|
||||
**For Java proxy access**:
|
||||
```bash
|
||||
cp deploy-chunked-simple.sh my-deploy.sh
|
||||
# Edit API_BASE
|
||||
vim my-deploy.sh
|
||||
# Run
|
||||
./my-deploy.sh
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
1. Enable `DEBUG_MODE="true"` in the script
|
||||
2. Check the curl commands and responses
|
||||
3. Review Spring Boot application logs
|
||||
4. Verify nginx/proxy logs for request forwarding
|
||||
150
MEMORY_FIX.md
Normal file
150
MEMORY_FIX.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# Fix for OutOfMemoryError: Cannot reserve direct buffer memory
|
||||
|
||||
## Problem
|
||||
```
|
||||
java.lang.OutOfMemoryError: Cannot reserve 10485760 bytes of direct buffer memory
|
||||
```
|
||||
|
||||
This occurs because Tanzu's default JVM configuration allocates very little direct (off-heap) memory, and multipart file uploads use direct buffers.
|
||||
|
||||
## Solutions Applied
|
||||
|
||||
### 1. Code Changes (Already Applied)
|
||||
✅ **ChunkedUploadService.java** - Changed to stream chunks in 8KB buffers instead of loading entire chunk into memory
|
||||
✅ **MultipartConfig.java** - Added configuration to write all uploads directly to disk (`file-size-threshold=0`)
|
||||
✅ **application.properties** - Reduced chunk size from 5MB to 2MB and enabled disk-based uploads
|
||||
|
||||
### 2. Tanzu Manifest Configuration (You Need to Apply)
|
||||
|
||||
**Option A: Set in manifest.yml**
|
||||
|
||||
Create or update your `manifest.yml`:
|
||||
|
||||
```yaml
|
||||
applications:
|
||||
- name: cf-deployer
|
||||
memory: 1G
|
||||
instances: 1
|
||||
path: build/libs/cf-deployer.jar
|
||||
buildpacks:
|
||||
- java_buildpack
|
||||
env:
|
||||
# Increase direct memory allocation
|
||||
JAVA_TOOL_OPTIONS: "-XX:MaxDirectMemorySize=256m -XX:+UseG1GC"
|
||||
# Alternative if using Java Buildpack Memory Calculator
|
||||
JBP_CONFIG_OPEN_JDK_JRE: '{ jre: { version: 17.+ }, memory_calculator: { memory_sizes: { metaspace: 128m, direct: 256m } } }'
|
||||
```
|
||||
|
||||
Then deploy:
|
||||
```bash
|
||||
cf push
|
||||
```
|
||||
|
||||
**Option B: Set environment variable directly**
|
||||
|
||||
```bash
|
||||
# Increase direct memory to 256MB
|
||||
cf set-env cf-deployer JAVA_TOOL_OPTIONS "-XX:MaxDirectMemorySize=256m -XX:+UseG1GC"
|
||||
|
||||
# Restage to apply changes
|
||||
cf restage cf-deployer
|
||||
```
|
||||
|
||||
**Option C: Increase overall memory**
|
||||
|
||||
If you have more memory available:
|
||||
```bash
|
||||
# Increase app memory to 2GB (gives more headroom)
|
||||
cf scale cf-deployer -m 2G
|
||||
|
||||
# Or in manifest.yml
|
||||
memory: 2G
|
||||
```
|
||||
|
||||
### 3. Client-Side Changes
|
||||
|
||||
Update your client to use 2MB chunks instead of 5MB:
|
||||
|
||||
**Bash script:**
|
||||
```bash
|
||||
CHUNK_SIZE=2097152 # 2MB instead of 5MB
|
||||
```
|
||||
|
||||
**JavaScript:**
|
||||
```javascript
|
||||
const CHUNK_SIZE = 2 * 1024 * 1024; // 2MB
|
||||
```
|
||||
|
||||
**Python:**
|
||||
```python
|
||||
CHUNK_SIZE = 2 * 1024 * 1024 # 2MB
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
After applying fixes, check the logs:
|
||||
|
||||
```bash
|
||||
cf logs cf-deployer --recent
|
||||
```
|
||||
|
||||
You should see successful chunk uploads:
|
||||
```
|
||||
2025-10-21 16:30:00 - Session xxx: Received chunk 1/50 for jarFile (2097152 bytes)
|
||||
2025-10-21 16:30:01 - Session xxx: Received chunk 2/50 for jarFile (2097152 bytes)
|
||||
```
|
||||
|
||||
## Why This Works
|
||||
|
||||
1. **`file-size-threshold=0`** - Spring writes uploads directly to disk instead of buffering in memory
|
||||
2. **Streaming chunks** - We read and write in 8KB buffers instead of loading entire chunk
|
||||
3. **Smaller chunks** - 2MB chunks use less memory than 5MB chunks
|
||||
4. **Increased direct memory** - More headroom for JVM's direct buffers
|
||||
5. **G1GC** - Better garbage collection for managing off-heap memory
|
||||
|
||||
## Testing
|
||||
|
||||
Test with a small file first:
|
||||
```bash
|
||||
# Create test session
|
||||
SESSION_ID=$(curl -s -X POST https://your-app.apps.cf.example.com/api/cf/upload/init \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"apiEndpoint":"https://api.cf.example.com","username":"user","password":"pass","organization":"org","space":"space","appName":"test","skipSslValidation":false}' \
|
||||
| grep -o '"uploadSessionId":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
# Upload a 2MB chunk
|
||||
head -c 2097152 /dev/urandom > test-chunk.bin
|
||||
|
||||
curl -X POST "https://your-app.apps.cf.example.com/api/cf/upload/chunk" \
|
||||
-F "uploadSessionId=$SESSION_ID" \
|
||||
-F "fileType=jarFile" \
|
||||
-F "chunkIndex=0" \
|
||||
-F "totalChunks=1" \
|
||||
-F "fileName=test.jar" \
|
||||
-F "chunk=@test-chunk.bin"
|
||||
```
|
||||
|
||||
If this succeeds, the fix is working!
|
||||
|
||||
## Recommended Tanzu Settings
|
||||
|
||||
For production deployments handling large files:
|
||||
|
||||
```yaml
|
||||
applications:
|
||||
- name: cf-deployer
|
||||
memory: 2G # Total memory
|
||||
disk_quota: 2G # Disk for temp files
|
||||
instances: 2 # For high availability
|
||||
health-check-type: http
|
||||
health-check-http-endpoint: /actuator/health
|
||||
env:
|
||||
JAVA_TOOL_OPTIONS: "-XX:MaxDirectMemorySize=512m -XX:+UseG1GC -XX:MaxGCPauseMillis=200"
|
||||
JBP_CONFIG_OPEN_JDK_JRE: '{ jre: { version: 17.+ }, memory_calculator: { memory_sizes: { direct: 512m, metaspace: 128m, reserved: 256m } } }'
|
||||
```
|
||||
|
||||
This gives you:
|
||||
- 512MB direct memory (plenty for chunked uploads)
|
||||
- G1 garbage collector (better for large objects)
|
||||
- 2GB total memory (Java heap + direct + metaspace + overhead)
|
||||
- Health check endpoint for monitoring
|
||||
164
README.md
164
README.md
@@ -28,7 +28,16 @@ The application will start on `http://localhost:8080`
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### 1. Deploy Application to Cloud Foundry
|
||||
### Deployment Options
|
||||
|
||||
This service provides two deployment methods:
|
||||
|
||||
1. **Traditional Upload** (`/api/cf/deploy`) - Single request deployment (may hit nginx size/timeout limits)
|
||||
2. **Chunked Upload** (`/api/cf/upload/*`) - Chunked upload with async deployment (recommended for production)
|
||||
|
||||
---
|
||||
|
||||
### Option 1: Traditional Deploy (Simple, may timeout with large files)
|
||||
|
||||
**Endpoint:** `POST /api/cf/deploy`
|
||||
|
||||
@@ -75,6 +84,154 @@ curl -X POST http://localhost:8080/api/cf/deploy \
|
||||
|
||||
---
|
||||
|
||||
### Option 2: Chunked Upload with Async Deployment (Recommended for Production)
|
||||
|
||||
**Why use chunked upload?**
|
||||
- Bypasses nginx `client_max_body_size` restrictions
|
||||
- Avoids nginx timeout issues during deployment
|
||||
- Better for large JAR files (>10MB)
|
||||
- More resilient to network interruptions
|
||||
|
||||
#### Quick Start: Using the Deployment Script
|
||||
|
||||
The easiest way to deploy using chunked upload is with the provided bash script:
|
||||
|
||||
1. **Configure the script:**
|
||||
```bash
|
||||
# Edit deploy-chunked.sh and set your values:
|
||||
API_BASE="https://your-cf-deployer.example.com/api/cf"
|
||||
JAR_FILE="./your-app.jar"
|
||||
MANIFEST_FILE="./manifest.yml"
|
||||
CF_API_ENDPOINT="https://api.cf.example.com"
|
||||
CF_USERNAME="your-username"
|
||||
CF_PASSWORD="your-password"
|
||||
CF_ORGANIZATION="your-org"
|
||||
CF_SPACE="your-space"
|
||||
CF_APP_NAME="your-app"
|
||||
```
|
||||
|
||||
2. **Make it executable and run:**
|
||||
```bash
|
||||
chmod +x deploy-chunked.sh
|
||||
./deploy-chunked.sh
|
||||
```
|
||||
|
||||
The script handles:
|
||||
- File chunking (1MB chunks by default)
|
||||
- Progress tracking
|
||||
- Async deployment
|
||||
- Status polling
|
||||
- Error handling
|
||||
|
||||
#### Manual cURL Commands (For Custom Integration)
|
||||
|
||||
**Step 1: Initialize Upload Session**
|
||||
```bash
|
||||
SESSION_ID=$(curl -s -X POST "http://localhost:8080/api/cf/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"apiEndpoint": "https://api.cf.example.com",
|
||||
"username": "your-username",
|
||||
"password": "your-password",
|
||||
"organization": "your-org",
|
||||
"space": "your-space",
|
||||
"appName": "my-app",
|
||||
"skipSslValidation": false
|
||||
}' | grep -o '"uploadSessionId":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
echo "Session ID: $SESSION_ID"
|
||||
```
|
||||
|
||||
**Step 2: Upload JAR File in Chunks**
|
||||
```bash
|
||||
# Split your JAR into 1MB chunks and upload each
|
||||
CHUNK_SIZE=1048576 # 1MB
|
||||
FILE="./app.jar"
|
||||
TOTAL_CHUNKS=$(( ($(stat -c%s "$FILE") + CHUNK_SIZE - 1) / CHUNK_SIZE ))
|
||||
|
||||
# Split file and upload each chunk
|
||||
split -b $CHUNK_SIZE "$FILE" chunk_
|
||||
CHUNK_INDEX=0
|
||||
|
||||
for chunk_file in chunk_*; do
|
||||
curl -X POST "http://localhost:8080/api/cf/upload/chunk" \
|
||||
-F "uploadSessionId=$SESSION_ID" \
|
||||
-F "fileType=jarFile" \
|
||||
-F "chunkIndex=$CHUNK_INDEX" \
|
||||
-F "totalChunks=$TOTAL_CHUNKS" \
|
||||
-F "fileName=app.jar" \
|
||||
-F "chunk=@$chunk_file"
|
||||
|
||||
CHUNK_INDEX=$((CHUNK_INDEX + 1))
|
||||
done
|
||||
|
||||
rm chunk_* # Cleanup
|
||||
```
|
||||
|
||||
**Step 3: Upload Manifest**
|
||||
```bash
|
||||
curl -X POST "http://localhost:8080/api/cf/upload/chunk" \
|
||||
-F "uploadSessionId=$SESSION_ID" \
|
||||
-F "fileType=manifest" \
|
||||
-F "chunkIndex=0" \
|
||||
-F "totalChunks=1" \
|
||||
-F "fileName=manifest.yml" \
|
||||
-F "chunk=@./manifest.yml"
|
||||
```
|
||||
|
||||
**Step 4: Start Async Deployment**
|
||||
```bash
|
||||
curl -X POST "http://localhost:8080/api/cf/upload/finalize?uploadSessionId=$SESSION_ID&async=true"
|
||||
|
||||
# Response: {"uploadSessionId":"...","status":"IN_PROGRESS","message":"Deployment started...","progress":0}
|
||||
```
|
||||
|
||||
**Step 5: Poll Deployment Status**
|
||||
```bash
|
||||
# Check deployment status every 5 seconds
|
||||
while true; do
|
||||
STATUS=$(curl -s "http://localhost:8080/api/cf/deployment/status/$SESSION_ID")
|
||||
echo "$STATUS"
|
||||
|
||||
# Check if completed or failed
|
||||
if echo "$STATUS" | grep -q '"status":"COMPLETED"'; then
|
||||
echo "Deployment successful!"
|
||||
break
|
||||
elif echo "$STATUS" | grep -q '"status":"FAILED"'; then
|
||||
echo "Deployment failed!"
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
```
|
||||
|
||||
**For more details, see:**
|
||||
- `deploy-chunked.sh` - Direct nginx deployment with cert/headers
|
||||
- `deploy-chunked-simple.sh` - Java proxy deployment with Base64 encoding
|
||||
- **`DEPLOYMENT_SCRIPTS.md`** - **Comprehensive guide for both deployment scripts** ⭐
|
||||
- `CHUNKED_UPLOAD_GUIDE.md` - Detailed API documentation
|
||||
- `TIMEOUT_SOLUTION.md` - Architecture and design details
|
||||
- `CHUNK_SIZE_GUIDE.md` - Chunk size recommendations
|
||||
- `MEMORY_FIX.md` - JVM memory configuration for Tanzu
|
||||
|
||||
### Two Deployment Scripts: Which One to Use?
|
||||
|
||||
We provide **two deployment scripts** for different network configurations:
|
||||
|
||||
| Script | Use When | Features |
|
||||
|--------|----------|----------|
|
||||
| **deploy-chunked.sh** | Direct access to nginx | Multipart uploads, cert/key support, custom headers |
|
||||
| **deploy-chunked-simple.sh** | Through Java proxy | Base64 encoded uploads, proxy adds cert/headers |
|
||||
|
||||
**📖 See [DEPLOYMENT_SCRIPTS.md](DEPLOYMENT_SCRIPTS.md) for detailed comparison, troubleshooting, and technical explanations.**
|
||||
|
||||
**Quick Decision Guide:**
|
||||
- ✅ Use `deploy-chunked.sh` if you have direct nginx access and need to provide certificates/headers
|
||||
- ✅ Use `deploy-chunked-simple.sh` if you go through a Java proxy that adds authentication automatically
|
||||
|
||||
---
|
||||
|
||||
### 2. List Applications
|
||||
|
||||
**Endpoint:** `POST /api/cf/apps`
|
||||
@@ -261,15 +418,18 @@ cf.cli.path=
|
||||
## Features
|
||||
|
||||
- **Application Deployment**: Deploy JAR files to Cloud Foundry with manifest support
|
||||
- **Chunked Upload Support**: Upload large files in chunks to bypass nginx size restrictions (NEW)
|
||||
- **Async Deployment**: Non-blocking deployment with status polling to avoid timeout issues (NEW)
|
||||
- **Application Management**: List apps, view details, and access logs
|
||||
- **Route Management**: List all routes in your CF space
|
||||
- **Automatic CF CLI Management**: Bundled CF CLI binaries for Linux, macOS, and Windows
|
||||
- **Secure Password Handling**: Passwords are masked in all log output
|
||||
- **Comprehensive Logging**: Detailed DEBUG-level logging for troubleshooting deployments
|
||||
- **Configurable Timeouts**: Adjustable timeout for long-running deployments (default: 600s)
|
||||
- **Large File Support**: Multipart file upload support up to 500MB
|
||||
- **Large File Support**: Chunked uploads support files of any size
|
||||
- **Automatic Cleanup**: Temporary files are automatically cleaned up after operations
|
||||
- **Error Handling**: Comprehensive exception handling with detailed error messages
|
||||
- **Production Ready**: Memory-optimized for Tanzu deployments with low-memory instances
|
||||
|
||||
## Error Handling
|
||||
|
||||
|
||||
281
TIMEOUT_SOLUTION.md
Normal file
281
TIMEOUT_SOLUTION.md
Normal file
@@ -0,0 +1,281 @@
|
||||
# Nginx Timeout Solution
|
||||
|
||||
## The Problem
|
||||
|
||||
### Chunking Solves Upload Timeouts ✅
|
||||
- Each chunk upload completes in seconds
|
||||
- Well under nginx's 30-second timeout
|
||||
- **No problem here!**
|
||||
|
||||
### But Deployment Still Times Out ❌
|
||||
The `/upload/finalize` endpoint can take 3-5+ minutes because:
|
||||
1. CF login
|
||||
2. CF push (staging, building, deploying)
|
||||
3. CF logout
|
||||
|
||||
This **WILL** hit nginx's 30-second timeout!
|
||||
|
||||
## The Solution: Async Deployment
|
||||
|
||||
Instead of waiting for deployment to complete, we return immediately and let the client poll for status.
|
||||
|
||||
### Flow Comparison
|
||||
|
||||
**Before (Times Out):**
|
||||
```
|
||||
Client → finalize → [waits 5 minutes] → ⏱️ NGINX TIMEOUT after 30s
|
||||
```
|
||||
|
||||
**After (Works):**
|
||||
```
|
||||
Client → finalize → ✅ Returns immediately (202 Accepted)
|
||||
Client → poll status every 5s → IN_PROGRESS
|
||||
Client → poll status → IN_PROGRESS
|
||||
Client → poll status → COMPLETED ✅
|
||||
```
|
||||
|
||||
## Updated API
|
||||
|
||||
### 1. Initialize Upload (unchanged)
|
||||
```bash
|
||||
POST /api/cf/upload/init
|
||||
```
|
||||
|
||||
### 2. Upload Chunks (unchanged)
|
||||
```bash
|
||||
POST /api/cf/upload/chunk
|
||||
```
|
||||
|
||||
### 3. Finalize Upload (NEW: async by default)
|
||||
```bash
|
||||
POST /api/cf/upload/finalize?uploadSessionId={sessionId}&async=true
|
||||
```
|
||||
|
||||
**Response (202 Accepted):**
|
||||
```json
|
||||
{
|
||||
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "IN_PROGRESS",
|
||||
"message": "Deployment started. Use /deployment/status endpoint to check progress.",
|
||||
"progress": 0
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Poll Deployment Status (NEW)
|
||||
```bash
|
||||
GET /api/cf/deployment/status/{uploadSessionId}
|
||||
```
|
||||
|
||||
**Response while deploying:**
|
||||
```json
|
||||
{
|
||||
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "IN_PROGRESS",
|
||||
"message": "Logging into Cloud Foundry...",
|
||||
"progress": 10
|
||||
}
|
||||
```
|
||||
|
||||
**Response when complete:**
|
||||
```json
|
||||
{
|
||||
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "COMPLETED",
|
||||
"message": "Deployment completed successfully",
|
||||
"output": "[full CF CLI output]",
|
||||
"progress": 100
|
||||
}
|
||||
```
|
||||
|
||||
**Response if failed:**
|
||||
```json
|
||||
{
|
||||
"uploadSessionId": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"status": "FAILED",
|
||||
"message": "Deployment failed: ...",
|
||||
"error": "[error details]",
|
||||
"progress": 0
|
||||
}
|
||||
```
|
||||
|
||||
## Updated Bash Script
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
API_BASE="http://your-app.example.com/api/cf"
|
||||
JAR_FILE="hf.jar"
|
||||
MANIFEST_FILE="manifest.yml"
|
||||
CHUNK_SIZE=1048576 # 1MB
|
||||
|
||||
CF_CONFIG='{
|
||||
"apiEndpoint": "https://api.cf.example.com",
|
||||
"username": "your-username",
|
||||
"password": "your-password",
|
||||
"organization": "your-org",
|
||||
"space": "your-space",
|
||||
"appName": "your-app",
|
||||
"skipSslValidation": false
|
||||
}'
|
||||
|
||||
echo "=== Step 1: Initialize Upload Session ==="
|
||||
INIT_RESPONSE=$(curl -s -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$CF_CONFIG")
|
||||
|
||||
SESSION_ID=$(echo $INIT_RESPONSE | grep -o '"uploadSessionId":"[^"]*' | cut -d'"' -f4)
|
||||
echo "Session created: $SESSION_ID"
|
||||
|
||||
# Function to upload file in chunks
|
||||
upload_file_in_chunks() {
|
||||
local file_path=$1
|
||||
local file_type=$2
|
||||
local file_name=$(basename "$file_path")
|
||||
local file_size=$(stat -f%z "$file_path" 2>/dev/null || stat -c%s "$file_path")
|
||||
local total_chunks=$(( ($file_size + $CHUNK_SIZE - 1) / $CHUNK_SIZE ))
|
||||
|
||||
echo ""
|
||||
echo "=== Uploading $file_type: $file_name ($total_chunks chunks) ==="
|
||||
|
||||
local temp_dir=$(mktemp -d)
|
||||
split -b $CHUNK_SIZE "$file_path" "$temp_dir/chunk_"
|
||||
|
||||
local chunk_index=0
|
||||
for chunk_file in "$temp_dir"/chunk_*; do
|
||||
printf "Uploading chunk %3d/%3d... " "$((chunk_index + 1))" "$total_chunks"
|
||||
|
||||
RESPONSE=$(curl -s -X POST "$API_BASE/upload/chunk" \
|
||||
-F "uploadSessionId=$SESSION_ID" \
|
||||
-F "fileType=$file_type" \
|
||||
-F "chunkIndex=$chunk_index" \
|
||||
-F "totalChunks=$total_chunks" \
|
||||
-F "fileName=$file_name" \
|
||||
-F "chunk=@$chunk_file")
|
||||
|
||||
SUCCESS=$(echo $RESPONSE | grep -o '"success":[^,]*' | cut -d':' -f2)
|
||||
|
||||
if [ "$SUCCESS" != "true" ]; then
|
||||
echo "FAILED"
|
||||
echo "$RESPONSE"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "OK"
|
||||
chunk_index=$((chunk_index + 1))
|
||||
done
|
||||
|
||||
rm -rf "$temp_dir"
|
||||
echo "$file_type upload completed"
|
||||
}
|
||||
|
||||
# Step 2: Upload JAR file
|
||||
upload_file_in_chunks "$JAR_FILE" "jarFile"
|
||||
|
||||
# Step 3: Upload manifest file
|
||||
upload_file_in_chunks "$MANIFEST_FILE" "manifest"
|
||||
|
||||
# Step 4: Start async deployment
|
||||
echo ""
|
||||
echo "=== Step 4: Starting deployment (async) ==="
|
||||
FINALIZE_RESPONSE=$(curl -s -X POST "$API_BASE/upload/finalize?uploadSessionId=$SESSION_ID&async=true")
|
||||
|
||||
STATUS=$(echo $FINALIZE_RESPONSE | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
if [ "$STATUS" != "IN_PROGRESS" ]; then
|
||||
echo "Failed to start deployment:"
|
||||
echo "$FINALIZE_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Deployment started. Polling for status..."
|
||||
|
||||
# Step 5: Poll deployment status
|
||||
POLL_INTERVAL=5 # seconds
|
||||
MAX_WAIT=600 # 10 minutes max
|
||||
|
||||
elapsed=0
|
||||
while [ $elapsed -lt $MAX_WAIT ]; do
|
||||
sleep $POLL_INTERVAL
|
||||
elapsed=$((elapsed + POLL_INTERVAL))
|
||||
|
||||
STATUS_RESPONSE=$(curl -s "$API_BASE/deployment/status/$SESSION_ID")
|
||||
CURRENT_STATUS=$(echo $STATUS_RESPONSE | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||
MESSAGE=$(echo $STATUS_RESPONSE | grep -o '"message":"[^"]*' | cut -d'"' -f4)
|
||||
PROGRESS=$(echo $STATUS_RESPONSE | grep -o '"progress":[0-9]*' | cut -d':' -f2)
|
||||
|
||||
printf "\r[%3ds] Status: %-15s Progress: %3s%% - %s" \
|
||||
"$elapsed" "$CURRENT_STATUS" "${PROGRESS:-0}" "$MESSAGE"
|
||||
|
||||
if [ "$CURRENT_STATUS" = "COMPLETED" ]; then
|
||||
echo ""
|
||||
echo ""
|
||||
echo "=== Deployment successful! ==="
|
||||
echo "$STATUS_RESPONSE" | jq '.' 2>/dev/null || echo "$STATUS_RESPONSE"
|
||||
exit 0
|
||||
elif [ "$CURRENT_STATUS" = "FAILED" ]; then
|
||||
echo ""
|
||||
echo ""
|
||||
echo "=== Deployment failed ==="
|
||||
echo "$STATUS_RESPONSE" | jq '.' 2>/dev/null || echo "$STATUS_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "=== Deployment timeout after ${MAX_WAIT}s ==="
|
||||
echo "Check status manually: curl $API_BASE/deployment/status/$SESSION_ID"
|
||||
exit 1
|
||||
```
|
||||
|
||||
## Status Values
|
||||
|
||||
| Status | Description |
|
||||
|--------|-------------|
|
||||
| `PENDING` | Upload session created but deployment not started |
|
||||
| `IN_PROGRESS` | Deployment is currently running |
|
||||
| `COMPLETED` | Deployment finished successfully |
|
||||
| `FAILED` | Deployment failed with errors |
|
||||
|
||||
## Nginx Configuration
|
||||
|
||||
With async deployment, nginx timeout is **not an issue**:
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 80;
|
||||
server_name your-app.example.com;
|
||||
|
||||
# Each chunk upload completes quickly
|
||||
client_max_body_size 10m;
|
||||
|
||||
# Standard timeouts work fine now
|
||||
proxy_read_timeout 60s; # Chunks complete in <5s
|
||||
proxy_connect_timeout 10s;
|
||||
proxy_send_timeout 60s;
|
||||
|
||||
location /api/cf/ {
|
||||
proxy_pass http://cf-deployer-backend:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## For Backwards Compatibility
|
||||
|
||||
If you want synchronous deployment (will timeout on nginx!):
|
||||
|
||||
```bash
|
||||
# Synchronous (old way - may timeout)
|
||||
curl -X POST "$API_BASE/upload/finalize?uploadSessionId=$SESSION_ID&async=false"
|
||||
```
|
||||
|
||||
**Default is async=true** to avoid timeout issues.
|
||||
|
||||
## Summary
|
||||
|
||||
✅ **Chunk uploads**: Complete in seconds, no timeout
|
||||
✅ **Finalize endpoint**: Returns immediately (async), no timeout
|
||||
✅ **Status polling**: Each poll completes in milliseconds, no timeout
|
||||
✅ **Total solution**: Works with standard 30-second nginx timeout!
|
||||
354
deploy-chunked-simple.sh
Normal file
354
deploy-chunked-simple.sh
Normal file
@@ -0,0 +1,354 @@
|
||||
#!/bin/bash
|
||||
|
||||
#############################################################################
|
||||
# CF Deployer - Simple Chunked Upload Deployment Script (Through Java proxy)
|
||||
#
|
||||
# This script deploys a Java application to Cloud Foundry using chunked
|
||||
# uploads to bypass nginx size restrictions and async deployment to avoid
|
||||
# timeout issues.
|
||||
#
|
||||
# USE THIS SCRIPT WHEN:
|
||||
# - You deploy through a Java proxy that adds certificates/headers
|
||||
# - The proxy reads request body as @RequestBody String
|
||||
# - You need to send chunks as Base64-encoded text
|
||||
#
|
||||
# For direct nginx deployments with cert/key, use: deploy-chunked.sh
|
||||
# For detailed documentation, see: DEPLOYMENT_SCRIPTS.md
|
||||
#
|
||||
# TECHNICAL NOTES:
|
||||
# - Chunks are Base64-encoded to prevent corruption in the Java proxy
|
||||
# - Query parameters are sent in URL (not form fields)
|
||||
# - Uses Spring Boot endpoint: POST /upload/chunk (Content-Type: text/plain)
|
||||
#
|
||||
# Usage:
|
||||
# ./deploy-chunked-simple.sh
|
||||
#
|
||||
# Configuration:
|
||||
# Edit the variables below to match your environment
|
||||
#############################################################################
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
#############################################################################
|
||||
# CONFIGURATION - Update these values for your deployment
|
||||
#############################################################################
|
||||
|
||||
# API endpoint
|
||||
API_BASE="http://localhost:8080/api/cf"
|
||||
|
||||
# Files to deploy
|
||||
JAR_FILE="./app.jar"
|
||||
MANIFEST_FILE="./manifest.yml"
|
||||
|
||||
# Chunk size (bytes)
|
||||
# Recommended: 1MB (1048576) for Tanzu with memory constraints
|
||||
# Options: 512KB (524288), 1MB (1048576), 2MB (2097152), 5MB (5242880)
|
||||
CHUNK_SIZE=1048576 # 1MB
|
||||
|
||||
# Cloud Foundry configuration
|
||||
CF_API_ENDPOINT="https://api.cf.example.com"
|
||||
CF_USERNAME="your-username"
|
||||
CF_PASSWORD="your-password"
|
||||
CF_ORGANIZATION="your-org"
|
||||
CF_SPACE="your-space"
|
||||
CF_APP_NAME="your-app"
|
||||
CF_SKIP_SSL="false" # Use "true" for self-signed certificates
|
||||
|
||||
# Polling configuration
|
||||
POLL_INTERVAL=5 # seconds between status checks
|
||||
MAX_WAIT=600 # maximum wait time in seconds (10 minutes)
|
||||
|
||||
# Debug mode (set to "true" to see curl commands and responses)
|
||||
DEBUG_MODE="false"
|
||||
|
||||
#############################################################################
|
||||
# SCRIPT - Do not modify below this line
|
||||
#############################################################################
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Helper functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if files exist
|
||||
if [ ! -f "$JAR_FILE" ]; then
|
||||
log_error "JAR file not found: $JAR_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$MANIFEST_FILE" ]; then
|
||||
log_error "Manifest file not found: $MANIFEST_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if curl is available
|
||||
if ! command -v curl &> /dev/null; then
|
||||
log_error "curl is not installed. Please install curl to use this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build CF configuration JSON
|
||||
CF_CONFIG=$(cat <<EOF
|
||||
{
|
||||
"apiEndpoint": "$CF_API_ENDPOINT",
|
||||
"username": "$CF_USERNAME",
|
||||
"password": "$CF_PASSWORD",
|
||||
"organization": "$CF_ORGANIZATION",
|
||||
"space": "$CF_SPACE",
|
||||
"appName": "$CF_APP_NAME",
|
||||
"skipSslValidation": $CF_SKIP_SSL
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
log_info "Starting deployment of $CF_APP_NAME to Cloud Foundry"
|
||||
log_info "JAR: $JAR_FILE"
|
||||
log_info "Manifest: $MANIFEST_FILE"
|
||||
log_info "Chunk size: $CHUNK_SIZE bytes ($(($CHUNK_SIZE / 1024))KB)"
|
||||
echo ""
|
||||
|
||||
#############################################################################
|
||||
# STEP 1: Initialize Upload Session
|
||||
#############################################################################
|
||||
log_info "Step 1/5: Initializing upload session..."
|
||||
|
||||
# Debug mode output
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
echo "DEBUG: API_BASE = $API_BASE"
|
||||
echo "DEBUG: Request JSON:"
|
||||
echo "$CF_CONFIG"
|
||||
fi
|
||||
|
||||
# Write JSON to temporary file to avoid quote escaping issues
|
||||
TEMP_JSON=$(mktemp)
|
||||
echo "$CF_CONFIG" > "$TEMP_JSON"
|
||||
|
||||
# Execute curl command
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
INIT_RESPONSE=$(curl -v -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @"$TEMP_JSON")
|
||||
else
|
||||
INIT_RESPONSE=$(curl -s -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @"$TEMP_JSON")
|
||||
fi
|
||||
|
||||
# Clean up temporary file
|
||||
rm -f "$TEMP_JSON"
|
||||
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
echo "DEBUG: Curl exit code: $CURL_EXIT_CODE"
|
||||
echo "DEBUG: Response:"
|
||||
echo "$INIT_RESPONSE"
|
||||
fi
|
||||
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
log_error "Failed to initialize upload session (curl exit code: $CURL_EXIT_CODE)"
|
||||
echo "$INIT_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Debug output
|
||||
if [ -z "$INIT_RESPONSE" ]; then
|
||||
log_error "Empty response from server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SESSION_ID=$(echo "$INIT_RESPONSE" | grep -o '"uploadSessionId":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
if [ -z "$SESSION_ID" ]; then
|
||||
log_error "Failed to get session ID from response:"
|
||||
echo "$INIT_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "Upload session created: $SESSION_ID"
|
||||
echo ""
|
||||
|
||||
#############################################################################
|
||||
# FUNCTION: Upload file in chunks
|
||||
#############################################################################
|
||||
upload_file_in_chunks() {
|
||||
local file_path=$1
|
||||
local file_type=$2
|
||||
local file_name=$(basename "$file_path")
|
||||
|
||||
# Get file size (cross-platform)
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
local file_size=$(stat -f%z "$file_path")
|
||||
else
|
||||
local file_size=$(stat -c%s "$file_path")
|
||||
fi
|
||||
|
||||
local total_chunks=$(( ($file_size + $CHUNK_SIZE - 1) / $CHUNK_SIZE ))
|
||||
local file_size_mb=$(awk "BEGIN {printf \"%.2f\", $file_size / 1048576}")
|
||||
|
||||
log_info "Uploading $file_type: $file_name (${file_size_mb}MB, $total_chunks chunks)"
|
||||
|
||||
# Create temporary directory for chunks
|
||||
local temp_dir=$(mktemp -d)
|
||||
|
||||
# Split file into chunks
|
||||
split -b $CHUNK_SIZE "$file_path" "$temp_dir/chunk_"
|
||||
|
||||
local chunk_index=0
|
||||
for chunk_file in "$temp_dir"/chunk_*; do
|
||||
printf " Chunk %3d/%3d... " "$((chunk_index + 1))" "$total_chunks"
|
||||
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
echo ""
|
||||
echo "DEBUG: SESSION_ID='$SESSION_ID'"
|
||||
echo "DEBUG: Uploading chunk to: $API_BASE/upload/chunk?uploadSessionId=$SESSION_ID&fileType=$file_type&chunkIndex=$chunk_index&totalChunks=$total_chunks&fileName=$file_name"
|
||||
fi
|
||||
|
||||
# Base64 encode the chunk to a temp file so Java proxy can handle it as text/String
|
||||
# This prevents corruption when proxy reads binary data as String
|
||||
# Use temp file to avoid "Argument list too long" error
|
||||
CHUNK_BASE64_FILE=$(mktemp)
|
||||
base64 < "$chunk_file" > "$CHUNK_BASE64_FILE"
|
||||
|
||||
RESPONSE=$(curl -s -X POST "$API_BASE/upload/chunk?uploadSessionId=$SESSION_ID&fileType=$file_type&chunkIndex=$chunk_index&totalChunks=$total_chunks&fileName=$file_name" \
|
||||
-H "Content-Type: text/plain" \
|
||||
-H "X-Chunk-Encoding: base64" \
|
||||
-d @"$CHUNK_BASE64_FILE")
|
||||
|
||||
# Clean up temp file
|
||||
rm -f "$CHUNK_BASE64_FILE"
|
||||
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
echo "DEBUG: Chunk response: $RESPONSE"
|
||||
fi
|
||||
|
||||
SUCCESS=$(echo "$RESPONSE" | grep -o '"success":[^,}]*' | cut -d':' -f2)
|
||||
|
||||
if [ "$SUCCESS" != "true" ]; then
|
||||
echo -e "${RED}FAILED${NC}"
|
||||
log_error "Failed to upload chunk $((chunk_index + 1))/$total_chunks"
|
||||
echo "DEBUG: SESSION_ID='$SESSION_ID'"
|
||||
echo "DEBUG: fileType='$file_type'"
|
||||
echo "DEBUG: chunkIndex='$chunk_index'"
|
||||
echo "DEBUG: totalChunks='$total_chunks'"
|
||||
echo "DEBUG: fileName='$file_name'"
|
||||
echo "$RESPONSE"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}OK${NC}"
|
||||
chunk_index=$((chunk_index + 1))
|
||||
done
|
||||
|
||||
# Cleanup temporary directory
|
||||
rm -rf "$temp_dir"
|
||||
log_success "$file_type upload completed ($total_chunks chunks)"
|
||||
echo ""
|
||||
}
|
||||
|
||||
#############################################################################
|
||||
# STEP 2: Upload JAR file
|
||||
#############################################################################
|
||||
log_info "Step 2/5: Uploading JAR file..."
|
||||
upload_file_in_chunks "$JAR_FILE" "jarFile"
|
||||
|
||||
#############################################################################
|
||||
# STEP 3: Upload manifest file
|
||||
#############################################################################
|
||||
log_info "Step 3/5: Uploading manifest file..."
|
||||
upload_file_in_chunks "$MANIFEST_FILE" "manifest"
|
||||
|
||||
#############################################################################
|
||||
# STEP 4: Start async deployment
|
||||
#############################################################################
|
||||
log_info "Step 4/5: Starting async deployment..."
|
||||
|
||||
FINALIZE_RESPONSE=$(curl -s -X POST "$API_BASE/upload/finalize?uploadSessionId=$SESSION_ID&async=true" \
|
||||
-H "Content-Length: 0")
|
||||
|
||||
STATUS=$(echo "$FINALIZE_RESPONSE" | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
if [ "$STATUS" != "IN_PROGRESS" ]; then
|
||||
log_error "Failed to start deployment. Status: $STATUS"
|
||||
echo "$FINALIZE_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "Deployment started successfully"
|
||||
log_info "Step 5/5: Polling deployment status (max wait: ${MAX_WAIT}s)..."
|
||||
echo ""
|
||||
|
||||
#############################################################################
|
||||
# STEP 5: Poll deployment status
|
||||
#############################################################################
|
||||
elapsed=0
|
||||
last_message=""
|
||||
|
||||
while [ $elapsed -lt $MAX_WAIT ]; do
|
||||
sleep $POLL_INTERVAL
|
||||
elapsed=$((elapsed + POLL_INTERVAL))
|
||||
|
||||
STATUS_RESPONSE=$(curl -s "$API_BASE/deployment/status/$SESSION_ID" 2>/dev/null)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
log_warning "Failed to fetch status, retrying..."
|
||||
continue
|
||||
fi
|
||||
|
||||
CURRENT_STATUS=$(echo "$STATUS_RESPONSE" | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||
MESSAGE=$(echo "$STATUS_RESPONSE" | grep -o '"message":"[^"]*' | cut -d'"' -f4)
|
||||
PROGRESS=$(echo "$STATUS_RESPONSE" | grep -o '"progress":[0-9]*' | cut -d':' -f2)
|
||||
|
||||
# Only print if message changed to reduce clutter
|
||||
if [ "$MESSAGE" != "$last_message" ]; then
|
||||
printf " [%3ds] Status: %-15s Progress: %3s%% - %s\n" \
|
||||
"$elapsed" "$CURRENT_STATUS" "${PROGRESS:-0}" "$MESSAGE"
|
||||
last_message="$MESSAGE"
|
||||
fi
|
||||
|
||||
# Check if deployment completed
|
||||
if [ "$CURRENT_STATUS" = "COMPLETED" ]; then
|
||||
echo ""
|
||||
log_success "Deployment completed successfully!"
|
||||
echo ""
|
||||
log_info "Deployment details:"
|
||||
echo "$STATUS_RESPONSE" | python -m json.tool 2>/dev/null || echo "$STATUS_RESPONSE"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if deployment failed
|
||||
if [ "$CURRENT_STATUS" = "FAILED" ]; then
|
||||
echo ""
|
||||
log_error "Deployment failed!"
|
||||
echo ""
|
||||
log_info "Error details:"
|
||||
echo "$STATUS_RESPONSE" | python -m json.tool 2>/dev/null || echo "$STATUS_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Timeout reached
|
||||
echo ""
|
||||
log_warning "Deployment timeout reached after ${MAX_WAIT}s"
|
||||
log_info "The deployment may still be running. Check status manually:"
|
||||
echo " curl $API_BASE/deployment/status/$SESSION_ID"
|
||||
exit 1
|
||||
396
deploy-chunked.sh
Normal file
396
deploy-chunked.sh
Normal file
@@ -0,0 +1,396 @@
|
||||
#!/bin/bash
|
||||
|
||||
#############################################################################
|
||||
# CF Deployer - Chunked Upload Deployment Script (Direct to nginx)
|
||||
#
|
||||
# This script deploys a Java application to Cloud Foundry using chunked
|
||||
# uploads to bypass nginx size restrictions and async deployment to avoid
|
||||
# timeout issues.
|
||||
#
|
||||
# USE THIS SCRIPT WHEN:
|
||||
# - You have direct access to nginx endpoint
|
||||
# - You need to provide client certificates and custom headers
|
||||
# - You can send multipart/form-data directly
|
||||
#
|
||||
# For deployments through a Java proxy, use: deploy-chunked-simple.sh
|
||||
# For detailed documentation, see: DEPLOYMENT_SCRIPTS.md
|
||||
#
|
||||
# Usage:
|
||||
# ./deploy-chunked.sh
|
||||
#
|
||||
# Configuration:
|
||||
# Edit the variables below to match your environment
|
||||
#############################################################################
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
#############################################################################
|
||||
# CONFIGURATION - Update these values for your deployment
|
||||
#############################################################################
|
||||
|
||||
# API endpoint
|
||||
API_BASE="http://localhost:8080/api/cf"
|
||||
|
||||
# Files to deploy
|
||||
JAR_FILE="./app.jar"
|
||||
MANIFEST_FILE="./manifest.yml"
|
||||
|
||||
# Chunk size (bytes)
|
||||
# Recommended: 1MB (1048576) for Tanzu with memory constraints
|
||||
# Options: 512KB (524288), 1MB (1048576), 2MB (2097152), 5MB (5242880)
|
||||
CHUNK_SIZE=1048576 # 1MB
|
||||
|
||||
# Cloud Foundry configuration
|
||||
CF_API_ENDPOINT="https://api.cf.example.com"
|
||||
CF_USERNAME="your-username"
|
||||
CF_PASSWORD="your-password"
|
||||
CF_ORGANIZATION="your-org"
|
||||
CF_SPACE="your-space"
|
||||
CF_APP_NAME="your-app"
|
||||
CF_SKIP_SSL="false" # Use "true" for self-signed certificates
|
||||
|
||||
# Polling configuration
|
||||
POLL_INTERVAL=5 # seconds between status checks
|
||||
MAX_WAIT=600 # maximum wait time in seconds (10 minutes)
|
||||
|
||||
# Debug mode (set to "true" to see curl commands and responses)
|
||||
DEBUG_MODE="false"
|
||||
|
||||
# Security configuration (optional)
|
||||
CERT_FILE="" # Path to client certificate file (leave empty to skip)
|
||||
KEY_FILE="" # Path to client private key file (leave empty to skip)
|
||||
X_FORWARDED_FOR="" # X-Forwarded-For header value (leave empty to skip)
|
||||
MY_APIM_KEY="" # My-APIM-KEY header value (leave empty to skip)
|
||||
|
||||
#############################################################################
|
||||
# SCRIPT - Do not modify below this line
|
||||
#############################################################################
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Helper functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if files exist
|
||||
if [ ! -f "$JAR_FILE" ]; then
|
||||
log_error "JAR file not found: $JAR_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$MANIFEST_FILE" ]; then
|
||||
log_error "Manifest file not found: $MANIFEST_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if curl is available
|
||||
if ! command -v curl &> /dev/null; then
|
||||
log_error "curl is not installed. Please install curl to use this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build curl options as an array to avoid quote issues
|
||||
CURL_OPTS=()
|
||||
if [ -n "$CERT_FILE" ]; then
|
||||
if [ ! -f "$CERT_FILE" ]; then
|
||||
log_error "Certificate file not found: $CERT_FILE"
|
||||
exit 1
|
||||
fi
|
||||
CURL_OPTS+=("--cert" "$CERT_FILE")
|
||||
fi
|
||||
|
||||
if [ -n "$KEY_FILE" ]; then
|
||||
if [ ! -f "$KEY_FILE" ]; then
|
||||
log_error "Key file not found: $KEY_FILE"
|
||||
exit 1
|
||||
fi
|
||||
CURL_OPTS+=("--key" "$KEY_FILE")
|
||||
fi
|
||||
|
||||
if [ -n "$X_FORWARDED_FOR" ]; then
|
||||
CURL_OPTS+=("-H" "X-Forwarded-For: $X_FORWARDED_FOR")
|
||||
fi
|
||||
|
||||
if [ -n "$MY_APIM_KEY" ]; then
|
||||
CURL_OPTS+=("-H" "My-APIM-KEY: $MY_APIM_KEY")
|
||||
fi
|
||||
|
||||
# Build CF configuration JSON
|
||||
CF_CONFIG=$(cat <<EOF
|
||||
{
|
||||
"apiEndpoint": "$CF_API_ENDPOINT",
|
||||
"username": "$CF_USERNAME",
|
||||
"password": "$CF_PASSWORD",
|
||||
"organization": "$CF_ORGANIZATION",
|
||||
"space": "$CF_SPACE",
|
||||
"appName": "$CF_APP_NAME",
|
||||
"skipSslValidation": $CF_SKIP_SSL
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
log_info "Starting deployment of $CF_APP_NAME to Cloud Foundry"
|
||||
log_info "JAR: $JAR_FILE"
|
||||
log_info "Manifest: $MANIFEST_FILE"
|
||||
log_info "Chunk size: $CHUNK_SIZE bytes ($(($CHUNK_SIZE / 1024))KB)"
|
||||
echo ""
|
||||
|
||||
#############################################################################
|
||||
# STEP 1: Initialize Upload Session
|
||||
#############################################################################
|
||||
log_info "Step 1/5: Initializing upload session..."
|
||||
|
||||
# Debug mode output
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
echo "DEBUG: API_BASE = $API_BASE"
|
||||
echo "DEBUG: CURL_OPTS = ${CURL_OPTS[@]}"
|
||||
echo "DEBUG: Request JSON:"
|
||||
echo "$CF_CONFIG"
|
||||
fi
|
||||
|
||||
# Write JSON to temporary file to avoid quote escaping issues
|
||||
TEMP_JSON=$(mktemp)
|
||||
echo "$CF_CONFIG" > "$TEMP_JSON"
|
||||
|
||||
# Build the curl command using array expansion
|
||||
if [ ${#CURL_OPTS[@]} -gt 0 ]; then
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
INIT_RESPONSE=$(curl -v -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
"${CURL_OPTS[@]}" \
|
||||
-d @"$TEMP_JSON")
|
||||
else
|
||||
INIT_RESPONSE=$(curl -s -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
"${CURL_OPTS[@]}" \
|
||||
-d @"$TEMP_JSON")
|
||||
fi
|
||||
else
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
INIT_RESPONSE=$(curl -v -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @"$TEMP_JSON")
|
||||
else
|
||||
INIT_RESPONSE=$(curl -s -X POST "$API_BASE/upload/init" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @"$TEMP_JSON")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up temporary file
|
||||
rm -f "$TEMP_JSON"
|
||||
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ "$DEBUG_MODE" = "true" ]; then
|
||||
echo "DEBUG: Curl exit code: $CURL_EXIT_CODE"
|
||||
echo "DEBUG: Response:"
|
||||
echo "$INIT_RESPONSE"
|
||||
fi
|
||||
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
log_error "Failed to initialize upload session (curl exit code: $CURL_EXIT_CODE)"
|
||||
echo "$INIT_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Debug output
|
||||
if [ -z "$INIT_RESPONSE" ]; then
|
||||
log_error "Empty response from server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SESSION_ID=$(echo "$INIT_RESPONSE" | grep -o '"uploadSessionId":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
if [ -z "$SESSION_ID" ]; then
|
||||
log_error "Failed to get session ID from response:"
|
||||
echo "$INIT_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "Upload session created: $SESSION_ID"
|
||||
echo ""
|
||||
|
||||
#############################################################################
|
||||
# FUNCTION: Upload file in chunks
|
||||
#############################################################################
|
||||
upload_file_in_chunks() {
|
||||
local file_path=$1
|
||||
local file_type=$2
|
||||
local file_name=$(basename "$file_path")
|
||||
|
||||
# Get file size (cross-platform)
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
local file_size=$(stat -f%z "$file_path")
|
||||
else
|
||||
local file_size=$(stat -c%s "$file_path")
|
||||
fi
|
||||
|
||||
local total_chunks=$(( ($file_size + $CHUNK_SIZE - 1) / $CHUNK_SIZE ))
|
||||
local file_size_mb=$(awk "BEGIN {printf \"%.2f\", $file_size / 1048576}")
|
||||
|
||||
log_info "Uploading $file_type: $file_name (${file_size_mb}MB, $total_chunks chunks)"
|
||||
|
||||
# Create temporary directory for chunks
|
||||
local temp_dir=$(mktemp -d)
|
||||
|
||||
# Split file into chunks
|
||||
split -b $CHUNK_SIZE "$file_path" "$temp_dir/chunk_"
|
||||
|
||||
local chunk_index=0
|
||||
for chunk_file in "$temp_dir"/chunk_*; do
|
||||
printf " Chunk %3d/%3d... " "$((chunk_index + 1))" "$total_chunks"
|
||||
|
||||
if [ ${#CURL_OPTS[@]} -gt 0 ]; then
|
||||
RESPONSE=$(curl -s -X POST "$API_BASE/upload/chunk" \
|
||||
"${CURL_OPTS[@]}" \
|
||||
-F "uploadSessionId=$SESSION_ID" \
|
||||
-F "fileType=$file_type" \
|
||||
-F "chunkIndex=$chunk_index" \
|
||||
-F "totalChunks=$total_chunks" \
|
||||
-F "fileName=$file_name" \
|
||||
-F "chunk=@$chunk_file")
|
||||
else
|
||||
RESPONSE=$(curl -s -X POST "$API_BASE/upload/chunk" \
|
||||
-F "uploadSessionId=$SESSION_ID" \
|
||||
-F "fileType=$file_type" \
|
||||
-F "chunkIndex=$chunk_index" \
|
||||
-F "totalChunks=$total_chunks" \
|
||||
-F "fileName=$file_name" \
|
||||
-F "chunk=@$chunk_file")
|
||||
fi
|
||||
|
||||
SUCCESS=$(echo "$RESPONSE" | grep -o '"success":[^,}]*' | cut -d':' -f2)
|
||||
|
||||
if [ "$SUCCESS" != "true" ]; then
|
||||
echo -e "${RED}FAILED${NC}"
|
||||
log_error "Failed to upload chunk $((chunk_index + 1))/$total_chunks"
|
||||
echo "$RESPONSE"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}OK${NC}"
|
||||
chunk_index=$((chunk_index + 1))
|
||||
done
|
||||
|
||||
# Cleanup temporary directory
|
||||
rm -rf "$temp_dir"
|
||||
log_success "$file_type upload completed ($total_chunks chunks)"
|
||||
echo ""
|
||||
}
|
||||
|
||||
#############################################################################
|
||||
# STEP 2: Upload JAR file
|
||||
#############################################################################
|
||||
log_info "Step 2/5: Uploading JAR file..."
|
||||
upload_file_in_chunks "$JAR_FILE" "jarFile"
|
||||
|
||||
#############################################################################
|
||||
# STEP 3: Upload manifest file
|
||||
#############################################################################
|
||||
log_info "Step 3/5: Uploading manifest file..."
|
||||
upload_file_in_chunks "$MANIFEST_FILE" "manifest"
|
||||
|
||||
#############################################################################
|
||||
# STEP 4: Start async deployment
|
||||
#############################################################################
|
||||
log_info "Step 4/5: Starting async deployment..."
|
||||
|
||||
if [ ${#CURL_OPTS[@]} -gt 0 ]; then
|
||||
FINALIZE_RESPONSE=$(curl -s -X POST "$API_BASE/upload/finalize?uploadSessionId=$SESSION_ID&async=true" \
|
||||
-H "Content-Length: 0" \
|
||||
"${CURL_OPTS[@]}")
|
||||
else
|
||||
FINALIZE_RESPONSE=$(curl -s -X POST "$API_BASE/upload/finalize?uploadSessionId=$SESSION_ID&async=true" \
|
||||
-H "Content-Length: 0")
|
||||
fi
|
||||
STATUS=$(echo "$FINALIZE_RESPONSE" | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||
|
||||
if [ "$STATUS" != "IN_PROGRESS" ]; then
|
||||
log_error "Failed to start deployment. Status: $STATUS"
|
||||
echo "$FINALIZE_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "Deployment started successfully"
|
||||
log_info "Step 5/5: Polling deployment status (max wait: ${MAX_WAIT}s)..."
|
||||
echo ""
|
||||
|
||||
#############################################################################
|
||||
# STEP 5: Poll deployment status
|
||||
#############################################################################
|
||||
elapsed=0
|
||||
last_message=""
|
||||
|
||||
while [ $elapsed -lt $MAX_WAIT ]; do
|
||||
sleep $POLL_INTERVAL
|
||||
elapsed=$((elapsed + POLL_INTERVAL))
|
||||
|
||||
if [ ${#CURL_OPTS[@]} -gt 0 ]; then
|
||||
STATUS_RESPONSE=$(curl -s "$API_BASE/deployment/status/$SESSION_ID" \
|
||||
"${CURL_OPTS[@]}" 2>/dev/null)
|
||||
else
|
||||
STATUS_RESPONSE=$(curl -s "$API_BASE/deployment/status/$SESSION_ID" 2>/dev/null)
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
log_warning "Failed to fetch status, retrying..."
|
||||
continue
|
||||
fi
|
||||
|
||||
CURRENT_STATUS=$(echo "$STATUS_RESPONSE" | grep -o '"status":"[^"]*' | cut -d'"' -f4)
|
||||
MESSAGE=$(echo "$STATUS_RESPONSE" | grep -o '"message":"[^"]*' | cut -d'"' -f4)
|
||||
PROGRESS=$(echo "$STATUS_RESPONSE" | grep -o '"progress":[0-9]*' | cut -d':' -f2)
|
||||
|
||||
# Only print if message changed to reduce clutter
|
||||
if [ "$MESSAGE" != "$last_message" ]; then
|
||||
printf " [%3ds] Status: %-15s Progress: %3s%% - %s\n" \
|
||||
"$elapsed" "$CURRENT_STATUS" "${PROGRESS:-0}" "$MESSAGE"
|
||||
last_message="$MESSAGE"
|
||||
fi
|
||||
|
||||
# Check if deployment completed
|
||||
if [ "$CURRENT_STATUS" = "COMPLETED" ]; then
|
||||
echo ""
|
||||
log_success "Deployment completed successfully!"
|
||||
echo ""
|
||||
log_info "Deployment details:"
|
||||
echo "$STATUS_RESPONSE" | python -m json.tool 2>/dev/null || echo "$STATUS_RESPONSE"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if deployment failed
|
||||
if [ "$CURRENT_STATUS" = "FAILED" ]; then
|
||||
echo ""
|
||||
log_error "Deployment failed!"
|
||||
echo ""
|
||||
log_info "Error details:"
|
||||
echo "$STATUS_RESPONSE" | python -m json.tool 2>/dev/null || echo "$STATUS_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Timeout reached
|
||||
echo ""
|
||||
log_warning "Deployment timeout reached after ${MAX_WAIT}s"
|
||||
log_info "The deployment may still be running. Check status manually:"
|
||||
echo " curl $API_BASE/deployment/status/$SESSION_ID"
|
||||
exit 1
|
||||
7
frontend/.dockerignore
Normal file
7
frontend/.dockerignore
Normal file
@@ -0,0 +1,7 @@
|
||||
node_modules
|
||||
npm-debug.log
|
||||
dist
|
||||
.git
|
||||
.gitignore
|
||||
README.md
|
||||
helm
|
||||
25
frontend/.gitignore
vendored
Normal file
25
frontend/.gitignore
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Node
|
||||
node_modules/
|
||||
npm-debug.log
|
||||
yarn-error.log
|
||||
|
||||
# Angular
|
||||
dist/
|
||||
.angular/
|
||||
.ng_build/
|
||||
.ng_pkg_build/
|
||||
|
||||
# IDEs
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Environment
|
||||
.env
|
||||
.env.local
|
||||
33
frontend/Dockerfile
Normal file
33
frontend/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
# Stage 1: Build Angular app
|
||||
FROM node:20-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
|
||||
# Install dependencies
|
||||
RUN npm ci
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build Angular app for production
|
||||
RUN npm run build:prod
|
||||
|
||||
# Stage 2: Serve with nginx
|
||||
FROM nginx:alpine
|
||||
|
||||
# Copy custom nginx config
|
||||
COPY nginx.conf /etc/nginx/nginx.conf.template
|
||||
|
||||
# Copy built Angular app
|
||||
COPY --from=builder /app/dist/cf-deployer-ui/browser /usr/share/nginx/html
|
||||
|
||||
# Set default backend URL
|
||||
ENV BACKEND_URL=http://localhost:8080
|
||||
|
||||
# Replace env variables and start nginx
|
||||
CMD envsubst '${BACKEND_URL}' < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf && nginx -g 'daemon off;'
|
||||
|
||||
EXPOSE 80
|
||||
348
frontend/README.md
Normal file
348
frontend/README.md
Normal file
@@ -0,0 +1,348 @@
|
||||
# Tanzu Deployer UI - Angular Frontend
|
||||
|
||||
A modern, dark-themed Angular 19.1 frontend for deploying applications to Tanzu/Cloud Foundry environments.
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ **Modern Dark Theme**: Sleek GitHub-inspired dark UI with blue/purple gradients
|
||||
- ✅ **Simple Form Interface**: Easy-to-use deployment form with all Tanzu configuration fields
|
||||
- ✅ **Modern File Upload**: Beautiful drag-and-drop style file upload buttons with SVG icons
|
||||
- ✅ **Chunked File Upload**: Handles large JAR files using Base64-encoded chunks (compatible with Java proxy)
|
||||
- ✅ **Real-time Progress**: Visual progress bar showing upload and deployment status
|
||||
- ✅ **Live Logs**: Collapsible terminal-style output window displaying deployment logs
|
||||
- ✅ **Responsive Design**: Works seamlessly on desktop, tablet, and mobile devices
|
||||
- ✅ **Secure**: Password fields are masked, follows Angular security best practices
|
||||
- ✅ **Configurable**: Environment-based configuration for all settings
|
||||
- ✅ **Production Ready**: Includes nginx configuration and Helm chart for K8s deployment
|
||||
|
||||
## Configuration
|
||||
|
||||
All application settings are managed via environment files:
|
||||
|
||||
**`src/environments/environment.ts`** (Development):
|
||||
```typescript
|
||||
export const environment = {
|
||||
production: false,
|
||||
apiBase: '/api/cf', // API endpoint base URL
|
||||
chunkSize: 1048576, // Chunk size in bytes (1MB)
|
||||
enableSslValidation: false, // Show/hide SSL validation checkbox
|
||||
pollInterval: 5000, // Poll interval in milliseconds
|
||||
maxPollAttempts: 120, // Max polling attempts (10 minutes)
|
||||
defaultLogsExpanded: true, // Logs expanded by default
|
||||
showDebugInfo: false // Show debug information
|
||||
};
|
||||
```
|
||||
|
||||
**`src/environments/environment.prod.ts`** (Production):
|
||||
- Same structure as development
|
||||
- Used when building with `--configuration production`
|
||||
|
||||
### Customizing Configuration
|
||||
|
||||
To customize the application behavior:
|
||||
|
||||
1. Edit `src/environments/environment.ts` for development
|
||||
2. Edit `src/environments/environment.prod.ts` for production
|
||||
3. Rebuild the application
|
||||
|
||||
**Common Customizations:**
|
||||
|
||||
- **Enable SSL Validation Checkbox**: Set `enableSslValidation: true`
|
||||
- **Change Chunk Size**: Set `chunkSize: 2097152` (2MB)
|
||||
- **Increase Poll Time**: Set `maxPollAttempts: 240` (20 minutes)
|
||||
- **Change API Endpoint**: Set `apiBase: 'https://api.example.com/cf'`
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Local Development
|
||||
|
||||
1. **Install dependencies:**
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
2. **Start development server:**
|
||||
```bash
|
||||
npm start
|
||||
```
|
||||
|
||||
3. **Open browser:**
|
||||
Navigate to `http://localhost:4200`
|
||||
|
||||
### Build for Production
|
||||
|
||||
```bash
|
||||
npm run build:prod
|
||||
```
|
||||
|
||||
Built files will be in `dist/cf-deployer-ui/browser/`
|
||||
|
||||
## Docker Deployment
|
||||
|
||||
### Build Docker Image
|
||||
|
||||
```bash
|
||||
docker build -t cf-deployer-ui:latest .
|
||||
```
|
||||
|
||||
### Run Docker Container
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 8080:80 \
|
||||
-e BACKEND_URL=http://cf-deployer-backend:8080 \
|
||||
--name cf-deployer-ui \
|
||||
cf-deployer-ui:latest
|
||||
```
|
||||
|
||||
**Environment Variables:**
|
||||
- `BACKEND_URL`: URL of the CF Deployer backend API (default: `http://localhost:8080`)
|
||||
|
||||
## Kubernetes Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Kubernetes cluster
|
||||
- Helm 3.x installed
|
||||
- Docker image pushed to registry
|
||||
|
||||
### Deploy with Helm
|
||||
|
||||
1. **Update values.yaml:**
|
||||
```yaml
|
||||
image:
|
||||
repository: your-registry/cf-deployer-ui
|
||||
tag: "1.0.0"
|
||||
|
||||
backend:
|
||||
url: "http://cf-deployer-backend:8080"
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: cf-deployer.example.com
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
```
|
||||
|
||||
2. **Install the chart:**
|
||||
```bash
|
||||
helm install cf-deployer-ui ./helm
|
||||
```
|
||||
|
||||
3. **Upgrade the chart:**
|
||||
```bash
|
||||
helm upgrade cf-deployer-ui ./helm
|
||||
```
|
||||
|
||||
4. **Uninstall:**
|
||||
```bash
|
||||
helm uninstall cf-deployer-ui
|
||||
```
|
||||
|
||||
### Helm Configuration Options
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-----------|-------------|---------|
|
||||
| `replicaCount` | Number of replicas | `2` |
|
||||
| `image.repository` | Docker image repository | `cf-deployer-ui` |
|
||||
| `image.tag` | Docker image tag | `latest` |
|
||||
| `service.type` | Kubernetes service type | `ClusterIP` |
|
||||
| `service.port` | Service port | `80` |
|
||||
| `backend.url` | Backend API URL | `http://cf-deployer-backend:8080` |
|
||||
| `ingress.enabled` | Enable ingress | `false` |
|
||||
| `resources.limits.cpu` | CPU limit | `500m` |
|
||||
| `resources.limits.memory` | Memory limit | `512Mi` |
|
||||
|
||||
## Usage
|
||||
|
||||
### Deployment Flow
|
||||
|
||||
1. **Fill in Cloud Foundry Details:**
|
||||
- API Endpoint (e.g., `https://api.cf.example.com`)
|
||||
- Username and Password
|
||||
- Organization and Space
|
||||
- Application Name
|
||||
- Skip SSL Validation (if needed)
|
||||
|
||||
2. **Select Files:**
|
||||
- JAR File: Your application JAR
|
||||
- Manifest File: Cloud Foundry manifest.yml
|
||||
|
||||
3. **Deploy:**
|
||||
- Click "Deploy to Cloud Foundry"
|
||||
- Watch progress bar and logs
|
||||
- Wait for completion
|
||||
|
||||
### Screenshots
|
||||
|
||||
**Main Form:**
|
||||
- Clean, responsive form with all required fields
|
||||
- File upload with size display
|
||||
- SSL validation checkbox
|
||||
|
||||
**Progress Tracking:**
|
||||
- Visual progress bar (0-100%)
|
||||
- Current step indicator
|
||||
- Status badges (IN_PROGRESS, COMPLETED, FAILED)
|
||||
|
||||
**Logs Output:**
|
||||
- Collapsible terminal-style output
|
||||
- Timestamped log entries
|
||||
- Auto-scroll to latest logs
|
||||
|
||||
## Architecture
|
||||
|
||||
### How It Works
|
||||
|
||||
The frontend mimics the behavior of `deploy-chunked-simple.sh`:
|
||||
|
||||
1. **Initialize Upload Session:**
|
||||
```
|
||||
POST /api/cf/upload/init
|
||||
→ Returns uploadSessionId
|
||||
```
|
||||
|
||||
2. **Upload Files in Chunks:**
|
||||
- Splits files into 1MB chunks
|
||||
- Base64 encodes each chunk (for Java proxy compatibility)
|
||||
- Uploads via:
|
||||
```
|
||||
POST /api/cf/upload/chunk?uploadSessionId=...&fileType=...&chunkIndex=...&totalChunks=...&fileName=...
|
||||
Headers: Content-Type: text/plain, X-Chunk-Encoding: base64
|
||||
Body: Base64 chunk data
|
||||
```
|
||||
|
||||
3. **Finalize Upload:**
|
||||
```
|
||||
POST /api/cf/upload/finalize?uploadSessionId=...&async=true
|
||||
```
|
||||
|
||||
4. **Poll Deployment Status:**
|
||||
```
|
||||
GET /api/cf/deployment/status/{uploadSessionId}
|
||||
(Every 5 seconds until COMPLETED or FAILED)
|
||||
```
|
||||
|
||||
### Why Base64 Encoding?
|
||||
|
||||
The frontend sends chunks as Base64-encoded text because:
|
||||
- It goes through a Java proxy that reads `@RequestBody String`
|
||||
- Binary data gets corrupted when read as String
|
||||
- Base64 ensures safe text transport through the proxy
|
||||
- Backend automatically decodes Base64 back to binary
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
frontend/
|
||||
├── src/
|
||||
│ ├── app/
|
||||
│ │ ├── app.component.ts # Main component with form logic
|
||||
│ │ ├── app.component.html # Template with form UI
|
||||
│ │ ├── app.component.css # Component styles
|
||||
│ │ └── deploy.service.ts # API service
|
||||
│ ├── index.html # Main HTML file
|
||||
│ ├── main.ts # Bootstrap file
|
||||
│ └── styles.css # Global styles
|
||||
├── helm/ # Helm chart
|
||||
│ ├── Chart.yaml
|
||||
│ ├── values.yaml
|
||||
│ └── templates/
|
||||
│ ├── deployment.yaml
|
||||
│ ├── service.yaml
|
||||
│ ├── ingress.yaml
|
||||
│ └── _helpers.tpl
|
||||
├── nginx.conf # nginx configuration
|
||||
├── Dockerfile # Multi-stage Docker build
|
||||
├── angular.json # Angular CLI configuration
|
||||
├── package.json # NPM dependencies
|
||||
└── tsconfig.json # TypeScript configuration
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Node.js 20.x or higher
|
||||
- npm 10.x or higher
|
||||
- Angular CLI 19.x
|
||||
|
||||
### Install Angular CLI
|
||||
|
||||
```bash
|
||||
npm install -g @angular/cli@19
|
||||
```
|
||||
|
||||
### Code Structure
|
||||
|
||||
**app.component.ts:**
|
||||
- Handles form state and validation
|
||||
- Manages file uploads and chunking
|
||||
- Polls deployment status
|
||||
- Updates progress and logs
|
||||
|
||||
**deploy.service.ts:**
|
||||
- Encapsulates all HTTP API calls
|
||||
- Returns RxJS Observables converted to Promises
|
||||
- Handles Base64 encoding headers
|
||||
|
||||
**Styling:**
|
||||
- Responsive grid layout
|
||||
- Mobile-first design
|
||||
- Terminal-style logs with custom scrollbar
|
||||
- Gradient progress bar
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### CORS Errors
|
||||
|
||||
If you see CORS errors in the browser console:
|
||||
|
||||
1. **Development:** Configure proxy in `angular.json`:
|
||||
```json
|
||||
{
|
||||
"serve": {
|
||||
"options": {
|
||||
"proxyConfig": "proxy.conf.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Create `proxy.conf.json`:
|
||||
```json
|
||||
{
|
||||
"/api": {
|
||||
"target": "http://localhost:8080",
|
||||
"secure": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. **Production:** nginx handles proxying (already configured)
|
||||
|
||||
### File Upload Fails
|
||||
|
||||
- Check that backend is running and accessible
|
||||
- Verify `BACKEND_URL` environment variable
|
||||
- Check browser console for error messages
|
||||
- Enable DEBUG_MODE in backend to see detailed logs
|
||||
|
||||
### Deployment Timeout
|
||||
|
||||
- Default timeout is 10 minutes (120 attempts × 5 seconds)
|
||||
- Increase `maxAttempts` in `pollDeploymentStatus()` if needed
|
||||
- Check backend logs for actual deployment status
|
||||
|
||||
## Browser Support
|
||||
|
||||
- Chrome/Edge (latest)
|
||||
- Firefox (latest)
|
||||
- Safari (latest)
|
||||
- Mobile browsers (iOS Safari, Chrome Android)
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see main project README
|
||||
76
frontend/angular.json
Normal file
76
frontend/angular.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"$schema": "./node_modules/@angular/cli/lib/config/schema.json",
|
||||
"version": 1,
|
||||
"newProjectRoot": "projects",
|
||||
"projects": {
|
||||
"cf-deployer-ui": {
|
||||
"projectType": "application",
|
||||
"root": "",
|
||||
"sourceRoot": "src",
|
||||
"prefix": "app",
|
||||
"architect": {
|
||||
"build": {
|
||||
"builder": "@angular-devkit/build-angular:application",
|
||||
"options": {
|
||||
"outputPath": "dist/cf-deployer-ui",
|
||||
"index": "src/index.html",
|
||||
"browser": "src/main.ts",
|
||||
"polyfills": [
|
||||
"zone.js"
|
||||
],
|
||||
"tsConfig": "tsconfig.json",
|
||||
"assets": [],
|
||||
"styles": [
|
||||
"src/styles.css"
|
||||
],
|
||||
"scripts": []
|
||||
},
|
||||
"configurations": {
|
||||
"production": {
|
||||
"budgets": [
|
||||
{
|
||||
"type": "initial",
|
||||
"maximumWarning": "500kB",
|
||||
"maximumError": "1MB"
|
||||
},
|
||||
{
|
||||
"type": "anyComponentStyle",
|
||||
"maximumWarning": "2kB",
|
||||
"maximumError": "4kB"
|
||||
}
|
||||
],
|
||||
"outputHashing": "all",
|
||||
"fileReplacements": [
|
||||
{
|
||||
"replace": "src/environments/environment.ts",
|
||||
"with": "src/environments/environment.prod.ts"
|
||||
}
|
||||
]
|
||||
},
|
||||
"development": {
|
||||
"optimization": false,
|
||||
"extractLicenses": false,
|
||||
"sourceMap": true
|
||||
}
|
||||
},
|
||||
"defaultConfiguration": "production"
|
||||
},
|
||||
"serve": {
|
||||
"builder": "@angular-devkit/build-angular:dev-server",
|
||||
"configurations": {
|
||||
"production": {
|
||||
"buildTarget": "cf-deployer-ui:build:production"
|
||||
},
|
||||
"development": {
|
||||
"buildTarget": "cf-deployer-ui:build:development"
|
||||
}
|
||||
},
|
||||
"defaultConfiguration": "development"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cli": {
|
||||
"analytics": false
|
||||
}
|
||||
}
|
||||
6
frontend/helm/Chart.yaml
Normal file
6
frontend/helm/Chart.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: cf-deployer-ui
|
||||
description: Cloud Foundry Deployer UI - Angular frontend
|
||||
type: application
|
||||
version: 1.0.0
|
||||
appVersion: "1.0.0"
|
||||
29
frontend/helm/templates/_helpers.tpl
Normal file
29
frontend/helm/templates/_helpers.tpl
Normal file
@@ -0,0 +1,29 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "cf-deployer-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
*/}}
|
||||
{{- define "cf-deployer-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "cf-deployer-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
64
frontend/helm/templates/deployment.yaml
Normal file
64
frontend/helm/templates/deployment.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "cf-deployer-ui.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "cf-deployer-ui.name" . }}
|
||||
chart: {{ include "cf-deployer-ui.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "cf-deployer-ui.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "cf-deployer-ui.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.targetPort }}
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: BACKEND_URL
|
||||
value: {{ .Values.backend.url | quote }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
44
frontend/helm/templates/ingress.yaml
Normal file
44
frontend/helm/templates/ingress.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "cf-deployer-ui.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "cf-deployer-ui.name" . }}
|
||||
chart: {{ include "cf-deployer-ui.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.className }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "cf-deployer-ui.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
19
frontend/helm/templates/service.yaml
Normal file
19
frontend/helm/templates/service.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "cf-deployer-ui.fullname" . }}
|
||||
labels:
|
||||
app: {{ include "cf-deployer-ui.name" . }}
|
||||
chart: {{ include "cf-deployer-ui.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ include "cf-deployer-ui.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
61
frontend/helm/values.yaml
Normal file
61
frontend/helm/values.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
# Default values for cf-deployer-ui
|
||||
|
||||
replicaCount: 2
|
||||
|
||||
image:
|
||||
repository: cf-deployer-ui
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "latest"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
create: false
|
||||
name: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
hosts:
|
||||
- host: cf-deployer.example.com
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
tls: []
|
||||
# - secretName: cf-deployer-tls
|
||||
# hosts:
|
||||
# - cf-deployer.example.com
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 2
|
||||
maxReplicas: 10
|
||||
targetCPUUtilizationPercentage: 80
|
||||
|
||||
# Backend service URL
|
||||
backend:
|
||||
url: "http://cf-deployer-backend:8080"
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
45
frontend/nginx.conf
Normal file
45
frontend/nginx.conf
Normal file
@@ -0,0 +1,45 @@
|
||||
worker_processes auto;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
sendfile on;
|
||||
keepalive_timeout 65;
|
||||
gzip on;
|
||||
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
# Angular app
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
# Proxy API requests to backend
|
||||
location /api/ {
|
||||
proxy_pass ${BACKEND_URL};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Increase timeout for long-running deployments
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_send_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
|
||||
# Allow large file uploads
|
||||
client_max_body_size 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
29
frontend/package.json
Normal file
29
frontend/package.json
Normal file
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"name": "cf-deployer-ui",
|
||||
"version": "1.0.0",
|
||||
"scripts": {
|
||||
"ng": "ng",
|
||||
"start": "ng serve",
|
||||
"build": "ng build",
|
||||
"build:prod": "ng build --configuration production"
|
||||
},
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@angular/animations": "^19.1.0",
|
||||
"@angular/common": "^19.1.0",
|
||||
"@angular/compiler": "^19.1.0",
|
||||
"@angular/core": "^19.1.0",
|
||||
"@angular/forms": "^19.1.0",
|
||||
"@angular/platform-browser": "^19.1.0",
|
||||
"@angular/platform-browser-dynamic": "^19.1.0",
|
||||
"rxjs": "^7.8.0",
|
||||
"tslib": "^2.6.0",
|
||||
"zone.js": "^0.15.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@angular-devkit/build-angular": "^19.1.0",
|
||||
"@angular/cli": "^19.1.0",
|
||||
"@angular/compiler-cli": "^19.1.0",
|
||||
"typescript": "~5.6.0"
|
||||
}
|
||||
}
|
||||
38
frontend/quick-start.sh
Normal file
38
frontend/quick-start.sh
Normal file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "CF Deployer UI - Quick Start"
|
||||
echo "============================"
|
||||
echo ""
|
||||
|
||||
# Check if Node.js is installed
|
||||
if ! command -v node &> /dev/null; then
|
||||
echo "Error: Node.js is not installed"
|
||||
echo "Please install Node.js 20.x or higher from https://nodejs.org"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Node.js version: $(node --version)"
|
||||
echo "npm version: $(npm --version)"
|
||||
echo ""
|
||||
|
||||
# Check if node_modules exists
|
||||
if [ ! -d "node_modules" ]; then
|
||||
echo "Installing dependencies..."
|
||||
npm install
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Ask for backend URL
|
||||
read -p "Enter backend URL (default: http://localhost:8080): " BACKEND_URL
|
||||
BACKEND_URL=${BACKEND_URL:-http://localhost:8080}
|
||||
|
||||
echo ""
|
||||
echo "Starting development server..."
|
||||
echo "Backend URL: $BACKEND_URL"
|
||||
echo ""
|
||||
echo "The app will be available at: http://localhost:4200"
|
||||
echo "Press Ctrl+C to stop"
|
||||
echo ""
|
||||
|
||||
# Start dev server
|
||||
npm start
|
||||
442
frontend/src/app/app.component.css
Normal file
442
frontend/src/app/app.component.css
Normal file
@@ -0,0 +1,442 @@
|
||||
/* Dark Theme Variables */
|
||||
:host {
|
||||
--bg-primary: #0d1117;
|
||||
--bg-secondary: #161b22;
|
||||
--bg-tertiary: #21262d;
|
||||
--bg-hover: #30363d;
|
||||
--border-color: #30363d;
|
||||
--text-primary: #c9d1d9;
|
||||
--text-secondary: #8b949e;
|
||||
--accent-primary: #58a6ff;
|
||||
--accent-secondary: #1f6feb;
|
||||
--success: #3fb950;
|
||||
--error: #f85149;
|
||||
--warning: #d29922;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 900px;
|
||||
margin: 0 auto;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
header {
|
||||
text-align: center;
|
||||
margin-bottom: 30px;
|
||||
padding: 30px;
|
||||
background: var(--bg-secondary);
|
||||
color: white;
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
header h1 {
|
||||
margin: 0 0 10px 0;
|
||||
font-size: 2.5em;
|
||||
font-weight: 700;
|
||||
background: linear-gradient(135deg, #ff6b35 0%, #ff8c42 25%, #ffaa00 50%, #ff6b35 75%, #f7931e 100%);
|
||||
-webkit-background-clip: text;
|
||||
-webkit-text-fill-color: transparent;
|
||||
background-clip: text;
|
||||
filter: drop-shadow(0 0 20px rgba(255, 107, 53, 0.3));
|
||||
}
|
||||
|
||||
header p {
|
||||
margin: 0;
|
||||
opacity: 0.9;
|
||||
font-size: 1.1em;
|
||||
}
|
||||
|
||||
.card {
|
||||
background: var(--bg-secondary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 12px;
|
||||
padding: 30px;
|
||||
margin-bottom: 20px;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
.card h2 {
|
||||
margin: 0 0 20px 0;
|
||||
color: var(--text-primary);
|
||||
font-size: 1.5em;
|
||||
}
|
||||
|
||||
.form-section {
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
.form-section h3 {
|
||||
margin: 0 0 20px 0;
|
||||
color: var(--text-primary);
|
||||
font-size: 1.2em;
|
||||
font-weight: 600;
|
||||
padding-bottom: 10px;
|
||||
border-bottom: 2px solid var(--border-color);
|
||||
}
|
||||
|
||||
.form-group {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.form-group label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.form-group input[type="text"],
|
||||
.form-group input[type="password"] {
|
||||
width: 100%;
|
||||
padding: 12px 16px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 8px;
|
||||
font-size: 14px;
|
||||
box-sizing: border-box;
|
||||
background: var(--bg-tertiary);
|
||||
color: var(--text-primary);
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.form-group input[type="text"]:focus,
|
||||
.form-group input[type="password"]:focus {
|
||||
outline: none;
|
||||
border-color: var(--accent-primary);
|
||||
box-shadow: 0 0 0 3px rgba(88, 166, 255, 0.1);
|
||||
}
|
||||
|
||||
.form-group input:disabled {
|
||||
background-color: var(--bg-primary);
|
||||
cursor: not-allowed;
|
||||
opacity: 0.6;
|
||||
}
|
||||
|
||||
/* Modern File Upload */
|
||||
.file-upload-wrapper {
|
||||
position: relative;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.file-input {
|
||||
position: absolute;
|
||||
width: 0.1px;
|
||||
height: 0.1px;
|
||||
opacity: 0;
|
||||
overflow: hidden;
|
||||
z-index: -1;
|
||||
}
|
||||
|
||||
.file-upload-btn {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 24px;
|
||||
padding: 12px 24px;
|
||||
background: var(--bg-tertiary);
|
||||
border: 2px dashed var(--border-color);
|
||||
border-radius: 8px;
|
||||
color: var(--text-primary);
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s ease;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.file-upload-btn:hover {
|
||||
background: var(--bg-hover);
|
||||
border-color: var(--accent-primary);
|
||||
color: var(--accent-primary);
|
||||
}
|
||||
|
||||
.file-upload-btn.disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.file-upload-btn svg {
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.file-info {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
padding: 10px 16px;
|
||||
background: var(--bg-tertiary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 8px;
|
||||
color: var(--text-primary);
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.file-info svg {
|
||||
flex-shrink: 0;
|
||||
color: var(--accent-primary);
|
||||
}
|
||||
|
||||
.file-size {
|
||||
color: var(--text-secondary);
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.form-group.checkbox {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.form-group.checkbox label {
|
||||
margin: 0;
|
||||
font-weight: normal;
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.form-group.checkbox input {
|
||||
width: auto;
|
||||
margin-right: 10px;
|
||||
cursor: pointer;
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
}
|
||||
|
||||
.form-row {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
gap: 20px;
|
||||
}
|
||||
|
||||
.form-actions {
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
margin-top: 30px;
|
||||
padding-top: 20px;
|
||||
border-top: 1px solid var(--border-color);
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.btn {
|
||||
padding: 14px 28px;
|
||||
border: none;
|
||||
border-radius: 8px;
|
||||
font-size: 16px;
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s ease;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.btn:disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
background: linear-gradient(135deg, #ff6b35 0%, #f7931e 100%);
|
||||
color: white;
|
||||
box-shadow: 0 4px 12px rgba(255, 107, 53, 0.3);
|
||||
}
|
||||
|
||||
.btn-primary:hover:not(:disabled) {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 6px 16px rgba(255, 107, 53, 0.4);
|
||||
}
|
||||
|
||||
.btn-secondary {
|
||||
background: var(--bg-tertiary);
|
||||
color: var(--text-primary);
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.btn-secondary:hover:not(:disabled) {
|
||||
background: var(--bg-hover);
|
||||
border-color: var(--accent-primary);
|
||||
}
|
||||
|
||||
.progress-section {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
.progress-label {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
margin-bottom: 10px;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.progress-percent {
|
||||
color: var(--accent-primary);
|
||||
font-variant-numeric: tabular-nums;
|
||||
}
|
||||
|
||||
.progress-bar {
|
||||
height: 32px;
|
||||
background: var(--bg-tertiary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.progress-fill {
|
||||
height: 100%;
|
||||
background: linear-gradient(90deg, #1f6feb 0%, #8957e5 100%);
|
||||
transition: width 0.3s ease;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
padding-right: 12px;
|
||||
color: white;
|
||||
font-weight: 600;
|
||||
box-shadow: inset 0 1px 2px rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
|
||||
.status-badge {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.badge {
|
||||
display: inline-block;
|
||||
padding: 10px 20px;
|
||||
border-radius: 8px;
|
||||
font-weight: 600;
|
||||
font-size: 14px;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.5px;
|
||||
}
|
||||
|
||||
.badge-success {
|
||||
background: rgba(63, 185, 80, 0.15);
|
||||
color: var(--success);
|
||||
border: 1px solid var(--success);
|
||||
}
|
||||
|
||||
.badge-error {
|
||||
background: rgba(248, 81, 73, 0.15);
|
||||
color: var(--error);
|
||||
border: 1px solid var(--error);
|
||||
}
|
||||
|
||||
.badge-info {
|
||||
background: rgba(88, 166, 255, 0.15);
|
||||
color: var(--accent-primary);
|
||||
border: 1px solid var(--accent-primary);
|
||||
}
|
||||
|
||||
.logs-card {
|
||||
background: var(--bg-primary);
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.logs-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
cursor: pointer;
|
||||
user-select: none;
|
||||
padding: 5px;
|
||||
border-radius: 6px;
|
||||
transition: background 0.2s ease;
|
||||
}
|
||||
|
||||
.logs-header:hover {
|
||||
background: var(--bg-hover);
|
||||
}
|
||||
|
||||
.logs-header h2 {
|
||||
color: var(--text-primary);
|
||||
margin: 0;
|
||||
font-size: 1.3em;
|
||||
}
|
||||
|
||||
.toggle-icon {
|
||||
font-size: 1.2em;
|
||||
color: var(--text-secondary);
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
.logs-output {
|
||||
background: var(--bg-tertiary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 8px;
|
||||
padding: 16px;
|
||||
margin-top: 16px;
|
||||
max-height: 500px;
|
||||
overflow-y: auto;
|
||||
font-family: 'Courier New', 'Consolas', monospace;
|
||||
font-size: 13px;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
.log-line {
|
||||
padding: 4px 0;
|
||||
color: var(--text-secondary);
|
||||
border-bottom: 1px solid rgba(48, 54, 61, 0.3);
|
||||
}
|
||||
|
||||
.log-line:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
/* Scrollbar styling */
|
||||
.logs-output::-webkit-scrollbar {
|
||||
width: 10px;
|
||||
}
|
||||
|
||||
.logs-output::-webkit-scrollbar-track {
|
||||
background: var(--bg-primary);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.logs-output::-webkit-scrollbar-thumb {
|
||||
background: var(--bg-hover);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.logs-output::-webkit-scrollbar-thumb:hover {
|
||||
background: #484f58;
|
||||
}
|
||||
|
||||
/* Responsive Design */
|
||||
@media (max-width: 768px) {
|
||||
.container {
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
header {
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
header h1 {
|
||||
font-size: 1.8em;
|
||||
}
|
||||
|
||||
.form-row {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
|
||||
.form-actions {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.btn {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.card {
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
.logs-output {
|
||||
max-height: 300px;
|
||||
font-size: 12px;
|
||||
}
|
||||
}
|
||||
217
frontend/src/app/app.component.html
Normal file
217
frontend/src/app/app.component.html
Normal file
@@ -0,0 +1,217 @@
|
||||
<div class="container">
|
||||
<header>
|
||||
<h1>Tanzu Deployer</h1>
|
||||
<p>Deploy your applications to Tanzu Platform</p>
|
||||
</header>
|
||||
|
||||
<div class="card">
|
||||
<form (ngSubmit)="deploy()" #deployForm="ngForm">
|
||||
<!-- Cloud Foundry Configuration -->
|
||||
<div class="form-section">
|
||||
<h3>Tanzu Configuration</h3>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="apiEndpoint">API Endpoint *</label>
|
||||
<input
|
||||
type="text"
|
||||
id="apiEndpoint"
|
||||
name="apiEndpoint"
|
||||
[(ngModel)]="apiEndpoint"
|
||||
placeholder="https://api.cf.example.com"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
</div>
|
||||
|
||||
<div class="form-row">
|
||||
<div class="form-group">
|
||||
<label for="username">Username *</label>
|
||||
<input
|
||||
type="text"
|
||||
id="username"
|
||||
name="username"
|
||||
[(ngModel)]="username"
|
||||
placeholder="your-username"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="password">Password *</label>
|
||||
<input
|
||||
type="password"
|
||||
id="password"
|
||||
name="password"
|
||||
[(ngModel)]="password"
|
||||
placeholder="••••••••"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-row">
|
||||
<div class="form-group">
|
||||
<label for="organization">Organization *</label>
|
||||
<input
|
||||
type="text"
|
||||
id="organization"
|
||||
name="organization"
|
||||
[(ngModel)]="organization"
|
||||
placeholder="your-org"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="space">Space *</label>
|
||||
<input
|
||||
type="text"
|
||||
id="space"
|
||||
name="space"
|
||||
[(ngModel)]="space"
|
||||
placeholder="dev"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="appName">Application Name *</label>
|
||||
<input
|
||||
type="text"
|
||||
id="appName"
|
||||
name="appName"
|
||||
[(ngModel)]="appName"
|
||||
placeholder="my-app"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
</div>
|
||||
|
||||
<div class="form-group checkbox" *ngIf="env.enableSslValidation">
|
||||
<label>
|
||||
<input
|
||||
type="checkbox"
|
||||
name="skipSsl"
|
||||
[(ngModel)]="skipSsl"
|
||||
[disabled]="uploading">
|
||||
Skip SSL Validation
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Files Upload -->
|
||||
<div class="form-section">
|
||||
<h3>Application Files</h3>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="jarFile">JAR File *</label>
|
||||
<div class="file-upload-wrapper">
|
||||
<input
|
||||
type="file"
|
||||
id="jarFile"
|
||||
name="jarFile"
|
||||
class="file-input"
|
||||
(change)="onJarFileChange($event)"
|
||||
accept=".jar"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
<label for="jarFile" class="file-upload-btn" [class.disabled]="uploading">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"></path>
|
||||
<polyline points="17 8 12 3 7 8"></polyline>
|
||||
<line x1="12" y1="3" x2="12" y2="15"></line>
|
||||
</svg>
|
||||
<span *ngIf="!jarFile">Choose JAR File</span>
|
||||
<span *ngIf="jarFile">Change File</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="file-info" *ngIf="jarFile">
|
||||
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<path d="M13 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V9z"></path>
|
||||
<polyline points="13 2 13 9 20 9"></polyline>
|
||||
</svg>
|
||||
<span>{{ jarFile.name }}</span>
|
||||
<span class="file-size">({{ (jarFile.size / 1024 / 1024).toFixed(2) }}MB)</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="manifestFile">Manifest File *</label>
|
||||
<div class="file-upload-wrapper">
|
||||
<input
|
||||
type="file"
|
||||
id="manifestFile"
|
||||
name="manifestFile"
|
||||
class="file-input"
|
||||
(change)="onManifestFileChange($event)"
|
||||
accept=".yml,.yaml"
|
||||
required
|
||||
[disabled]="uploading">
|
||||
<label for="manifestFile" class="file-upload-btn" [class.disabled]="uploading">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"></path>
|
||||
<polyline points="17 8 12 3 7 8"></polyline>
|
||||
<line x1="12" y1="3" x2="12" y2="15"></line>
|
||||
</svg>
|
||||
<span *ngIf="!manifestFile">Choose Manifest File</span>
|
||||
<span *ngIf="manifestFile">Change File</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="file-info" *ngIf="manifestFile">
|
||||
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<path d="M13 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V9z"></path>
|
||||
<polyline points="13 2 13 9 20 9"></polyline>
|
||||
</svg>
|
||||
<span>{{ manifestFile.name }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Action Buttons -->
|
||||
<div class="form-actions">
|
||||
<button type="submit" class="btn btn-primary" [disabled]="!deployForm.form.valid || uploading">
|
||||
<span *ngIf="!uploading">Deploy to Tanzu</span>
|
||||
<span *ngIf="uploading">Deploying...</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-secondary" (click)="clearForm()" [disabled]="uploading">
|
||||
Clear Form
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<!-- Progress Section -->
|
||||
<div class="card" *ngIf="uploading || deploymentStatus">
|
||||
<h2>Deployment Progress</h2>
|
||||
|
||||
<div class="progress-section">
|
||||
<div class="progress-label">
|
||||
<span>{{ currentStep }}</span>
|
||||
<span class="progress-percent">{{ uploadProgress }}%</span>
|
||||
</div>
|
||||
|
||||
<div class="progress-bar">
|
||||
<div class="progress-fill" [style.width.%]="uploadProgress"></div>
|
||||
</div>
|
||||
|
||||
<div class="status-badge" *ngIf="deploymentStatus">
|
||||
<span class="badge" [class.badge-success]="deploymentStatus === 'COMPLETED'"
|
||||
[class.badge-error]="deploymentStatus === 'FAILED'"
|
||||
[class.badge-info]="deploymentStatus === 'IN_PROGRESS'">
|
||||
{{ deploymentStatus }}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Logs Section -->
|
||||
<div class="card logs-card" *ngIf="logs.length > 0">
|
||||
<div class="logs-header" (click)="toggleLogs()">
|
||||
<h2>Deployment Logs</h2>
|
||||
<span class="toggle-icon">{{ logsExpanded ? '▼' : '▶' }}</span>
|
||||
</div>
|
||||
|
||||
<div class="logs-output" *ngIf="logsExpanded">
|
||||
<div class="log-line" *ngFor="let log of logs">{{ log }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
233
frontend/src/app/app.component.ts
Normal file
233
frontend/src/app/app.component.ts
Normal file
@@ -0,0 +1,233 @@
|
||||
import { Component, OnInit } from '@angular/core';
|
||||
import { CommonModule } from '@angular/common';
|
||||
import { FormsModule } from '@angular/forms';
|
||||
import { DeployService } from './deploy.service';
|
||||
import { environment } from '../environments/environment';
|
||||
|
||||
@Component({
|
||||
selector: 'app-root',
|
||||
standalone: true,
|
||||
imports: [CommonModule, FormsModule],
|
||||
templateUrl: './app.component.html',
|
||||
styleUrls: ['./app.component.css']
|
||||
})
|
||||
export class AppComponent implements OnInit {
|
||||
// Environment config
|
||||
env = environment;
|
||||
|
||||
// Form fields
|
||||
apiEndpoint = '';
|
||||
username = '';
|
||||
password = '';
|
||||
organization = '';
|
||||
space = '';
|
||||
appName = '';
|
||||
skipSsl = false;
|
||||
jarFile: File | null = null;
|
||||
manifestFile: File | null = null;
|
||||
|
||||
// Upload state
|
||||
uploading = false;
|
||||
uploadProgress = 0;
|
||||
currentStep = '';
|
||||
logs: string[] = [];
|
||||
logsExpanded = environment.defaultLogsExpanded;
|
||||
deploymentStatus = '';
|
||||
sessionId = '';
|
||||
|
||||
constructor(private deployService: DeployService) {}
|
||||
|
||||
ngOnInit(): void {
|
||||
// Auto-populate form fields from environment.local.ts (development only)
|
||||
const formDefaults = environment.formDefaults as any;
|
||||
if (formDefaults && formDefaults.enabled) {
|
||||
this.apiEndpoint = formDefaults.apiEndpoint || '';
|
||||
this.username = formDefaults.username || '';
|
||||
this.password = formDefaults.password || '';
|
||||
this.organization = formDefaults.organization || '';
|
||||
this.space = formDefaults.space || '';
|
||||
this.appName = formDefaults.appName || '';
|
||||
this.skipSsl = formDefaults.skipSslValidation || false;
|
||||
|
||||
console.log('Form auto-populated from environment.local.ts');
|
||||
}
|
||||
}
|
||||
|
||||
onJarFileChange(event: any) {
|
||||
this.jarFile = event.target.files[0];
|
||||
}
|
||||
|
||||
onManifestFileChange(event: any) {
|
||||
this.manifestFile = event.target.files[0];
|
||||
}
|
||||
|
||||
addLog(message: string) {
|
||||
const timestamp = new Date().toLocaleTimeString();
|
||||
this.logs.push(`[${timestamp}] ${message}`);
|
||||
}
|
||||
|
||||
async deploy() {
|
||||
if (!this.jarFile || !this.manifestFile) {
|
||||
alert('Please select both JAR and Manifest files');
|
||||
return;
|
||||
}
|
||||
|
||||
this.uploading = true;
|
||||
this.uploadProgress = 0;
|
||||
this.logs = [];
|
||||
this.deploymentStatus = '';
|
||||
|
||||
try {
|
||||
// Step 1: Initialize upload session
|
||||
this.currentStep = 'Initializing upload session...';
|
||||
this.addLog('Step 1/5: Initializing upload session');
|
||||
|
||||
const config = {
|
||||
apiEndpoint: this.apiEndpoint,
|
||||
username: this.username,
|
||||
password: this.password,
|
||||
organization: this.organization,
|
||||
space: this.space,
|
||||
appName: this.appName,
|
||||
skipSslValidation: this.skipSsl
|
||||
};
|
||||
|
||||
const initResponse = await this.deployService.initUpload(config);
|
||||
this.sessionId = initResponse.uploadSessionId;
|
||||
this.addLog(`✓ Session created: ${this.sessionId}`);
|
||||
this.uploadProgress = 10;
|
||||
|
||||
// Step 2: Upload JAR file
|
||||
this.currentStep = 'Uploading JAR file...';
|
||||
this.addLog(`Step 2/5: Uploading JAR file (${(this.jarFile.size / 1024 / 1024).toFixed(2)}MB)`);
|
||||
|
||||
await this.uploadFileInChunks(this.jarFile, 'jarFile', 10, 50);
|
||||
this.addLog('✓ JAR file upload completed');
|
||||
|
||||
// Step 3: Upload manifest file
|
||||
this.currentStep = 'Uploading manifest file...';
|
||||
this.addLog('Step 3/5: Uploading manifest file');
|
||||
|
||||
await this.uploadFileInChunks(this.manifestFile, 'manifest', 50, 60);
|
||||
this.addLog('✓ Manifest file upload completed');
|
||||
|
||||
// Step 4: Finalize and start deployment
|
||||
this.currentStep = 'Starting deployment...';
|
||||
this.addLog('Step 4/5: Starting async deployment');
|
||||
|
||||
const finalizeResponse = await this.deployService.finalizeUpload(this.sessionId);
|
||||
this.addLog(`✓ Deployment started (Status: ${finalizeResponse.status})`);
|
||||
this.uploadProgress = 70;
|
||||
|
||||
// Step 5: Poll deployment status
|
||||
this.currentStep = 'Deploying application...';
|
||||
this.addLog('Step 5/5: Polling deployment status');
|
||||
|
||||
await this.pollDeploymentStatus();
|
||||
|
||||
} catch (error: any) {
|
||||
this.addLog(`✗ ERROR: ${error.message || error}`);
|
||||
this.currentStep = 'Deployment failed';
|
||||
this.deploymentStatus = 'FAILED';
|
||||
this.uploading = false;
|
||||
}
|
||||
}
|
||||
|
||||
async uploadFileInChunks(file: File, fileType: string, startProgress: number, endProgress: number) {
|
||||
const CHUNK_SIZE = environment.chunkSize;
|
||||
const totalChunks = Math.ceil(file.size / CHUNK_SIZE);
|
||||
|
||||
for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) {
|
||||
const start = chunkIndex * CHUNK_SIZE;
|
||||
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||
const chunk = file.slice(start, end);
|
||||
|
||||
// Convert chunk to base64
|
||||
const base64Chunk = await this.fileToBase64(chunk);
|
||||
|
||||
await this.deployService.uploadChunk(
|
||||
this.sessionId,
|
||||
fileType,
|
||||
file.name,
|
||||
chunkIndex,
|
||||
totalChunks,
|
||||
base64Chunk
|
||||
);
|
||||
|
||||
const progress = startProgress + ((chunkIndex + 1) / totalChunks) * (endProgress - startProgress);
|
||||
this.uploadProgress = Math.round(progress);
|
||||
this.addLog(` Chunk ${chunkIndex + 1}/${totalChunks} uploaded`);
|
||||
}
|
||||
}
|
||||
|
||||
async fileToBase64(blob: Blob): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
const base64 = (reader.result as string).split(',')[1];
|
||||
resolve(base64);
|
||||
};
|
||||
reader.onerror = reject;
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
}
|
||||
|
||||
async pollDeploymentStatus() {
|
||||
const maxAttempts = environment.maxPollAttempts;
|
||||
let attempts = 0;
|
||||
|
||||
while (attempts < maxAttempts) {
|
||||
await new Promise(resolve => setTimeout(resolve, environment.pollInterval));
|
||||
|
||||
const status = await this.deployService.getDeploymentStatus(this.sessionId);
|
||||
this.deploymentStatus = status.status;
|
||||
|
||||
const progressPercent = 70 + (status.progress || 0) * 0.3;
|
||||
this.uploadProgress = Math.round(progressPercent);
|
||||
|
||||
this.addLog(` Status: ${status.status} - ${status.message || ''}`);
|
||||
|
||||
if (status.status === 'COMPLETED') {
|
||||
this.currentStep = 'Deployment completed successfully!';
|
||||
this.uploadProgress = 100;
|
||||
this.uploading = false;
|
||||
this.addLog('✓ Deployment completed successfully!');
|
||||
return;
|
||||
}
|
||||
|
||||
if (status.status === 'FAILED') {
|
||||
this.currentStep = 'Deployment failed';
|
||||
this.uploading = false;
|
||||
this.addLog(`✗ Deployment failed: ${status.message}`);
|
||||
if (status.error) {
|
||||
this.addLog(` Error: ${status.error}`);
|
||||
}
|
||||
throw new Error('Deployment failed');
|
||||
}
|
||||
|
||||
attempts++;
|
||||
}
|
||||
|
||||
throw new Error('Deployment timeout after 10 minutes');
|
||||
}
|
||||
|
||||
toggleLogs() {
|
||||
this.logsExpanded = !this.logsExpanded;
|
||||
}
|
||||
|
||||
clearForm() {
|
||||
this.apiEndpoint = '';
|
||||
this.username = '';
|
||||
this.password = '';
|
||||
this.organization = '';
|
||||
this.space = '';
|
||||
this.appName = '';
|
||||
this.skipSsl = false;
|
||||
this.jarFile = null;
|
||||
this.manifestFile = null;
|
||||
this.logs = [];
|
||||
this.uploadProgress = 0;
|
||||
this.currentStep = '';
|
||||
this.deploymentStatus = '';
|
||||
}
|
||||
}
|
||||
97
frontend/src/app/deploy.service.ts
Normal file
97
frontend/src/app/deploy.service.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import { Injectable } from '@angular/core';
|
||||
import { HttpClient, HttpHeaders } from '@angular/common/http';
|
||||
import { firstValueFrom } from 'rxjs';
|
||||
import { environment } from '../environments/environment';
|
||||
|
||||
export interface CfConfig {
|
||||
apiEndpoint: string;
|
||||
username: string;
|
||||
password: string;
|
||||
organization: string;
|
||||
space: string;
|
||||
appName: string;
|
||||
skipSslValidation: boolean;
|
||||
}
|
||||
|
||||
export interface InitUploadResponse {
|
||||
success: boolean;
|
||||
uploadSessionId: string;
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface ChunkUploadResponse {
|
||||
success: boolean;
|
||||
uploadSessionId: string;
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface FinalizeResponse {
|
||||
uploadSessionId: string;
|
||||
status: string;
|
||||
message: string;
|
||||
progress: number;
|
||||
}
|
||||
|
||||
export interface DeploymentStatus {
|
||||
uploadSessionId: string;
|
||||
status: string;
|
||||
message: string;
|
||||
progress: number;
|
||||
output?: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
@Injectable({
|
||||
providedIn: 'root'
|
||||
})
|
||||
export class DeployService {
|
||||
private apiBase = environment.apiBase;
|
||||
|
||||
constructor(private http: HttpClient) {}
|
||||
|
||||
async initUpload(config: CfConfig): Promise<InitUploadResponse> {
|
||||
const url = `${this.apiBase}/upload/init`;
|
||||
return firstValueFrom(
|
||||
this.http.post<InitUploadResponse>(url, config)
|
||||
);
|
||||
}
|
||||
|
||||
async uploadChunk(
|
||||
sessionId: string,
|
||||
fileType: string,
|
||||
fileName: string,
|
||||
chunkIndex: number,
|
||||
totalChunks: number,
|
||||
base64Data: string
|
||||
): Promise<ChunkUploadResponse> {
|
||||
const url = `${this.apiBase}/upload/chunk?uploadSessionId=${sessionId}&fileType=${fileType}&chunkIndex=${chunkIndex}&totalChunks=${totalChunks}&fileName=${encodeURIComponent(fileName)}`;
|
||||
|
||||
const headers = new HttpHeaders({
|
||||
'Content-Type': 'text/plain',
|
||||
'X-Chunk-Encoding': 'base64'
|
||||
});
|
||||
|
||||
return firstValueFrom(
|
||||
this.http.post<ChunkUploadResponse>(url, base64Data, { headers })
|
||||
);
|
||||
}
|
||||
|
||||
async finalizeUpload(sessionId: string): Promise<FinalizeResponse> {
|
||||
const url = `${this.apiBase}/upload/finalize?uploadSessionId=${sessionId}&async=true`;
|
||||
|
||||
const headers = new HttpHeaders({
|
||||
'Content-Length': '0'
|
||||
});
|
||||
|
||||
return firstValueFrom(
|
||||
this.http.post<FinalizeResponse>(url, null, { headers })
|
||||
);
|
||||
}
|
||||
|
||||
async getDeploymentStatus(sessionId: string): Promise<DeploymentStatus> {
|
||||
const url = `${this.apiBase}/deployment/status/${sessionId}`;
|
||||
return firstValueFrom(
|
||||
this.http.get<DeploymentStatus>(url)
|
||||
);
|
||||
}
|
||||
}
|
||||
24
frontend/src/environments/environment.local.template.ts
Normal file
24
frontend/src/environments/environment.local.template.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Local Development Form Defaults Template
|
||||
*
|
||||
* This file is a template for environment.local.ts which is gitignored.
|
||||
* To use this feature:
|
||||
* 1. Copy this file to environment.local.ts
|
||||
* 2. Fill in your development credentials
|
||||
* 3. The form will auto-populate in development mode
|
||||
*
|
||||
* NOTE: This only works in development. Production builds ignore these defaults.
|
||||
*/
|
||||
export const localFormDefaults = {
|
||||
// Set to true to enable auto-population of form fields
|
||||
enabled: false,
|
||||
|
||||
// Cloud Foundry / Tanzu Configuration
|
||||
apiEndpoint: '', // e.g., 'https://api.cf.example.com'
|
||||
username: '', // Your CF username
|
||||
password: '', // Your CF password
|
||||
organization: '', // Your CF organization
|
||||
space: '', // Your CF space (e.g., 'dev', 'staging')
|
||||
appName: '', // Default application name
|
||||
skipSslValidation: false // Skip SSL validation (for development environments)
|
||||
};
|
||||
23
frontend/src/environments/environment.prod.ts
Normal file
23
frontend/src/environments/environment.prod.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
export const environment = {
|
||||
production: true,
|
||||
|
||||
// API Configuration
|
||||
apiBase: '/api/cf',
|
||||
|
||||
// Upload Configuration
|
||||
chunkSize: 1048576, // 1MB in bytes
|
||||
|
||||
// Feature Flags
|
||||
enableSslValidation: false, // Show/hide SSL validation checkbox
|
||||
|
||||
// Polling Configuration
|
||||
pollInterval: 5000, // 5 seconds in milliseconds
|
||||
maxPollAttempts: 120, // 10 minutes total (120 * 5 seconds)
|
||||
|
||||
// UI Configuration
|
||||
defaultLogsExpanded: true,
|
||||
showDebugInfo: false,
|
||||
|
||||
// Form Defaults (ALWAYS disabled in production)
|
||||
formDefaults: { enabled: false }
|
||||
};
|
||||
32
frontend/src/environments/environment.ts
Normal file
32
frontend/src/environments/environment.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
// Try to import local form defaults (gitignored file with dev credentials)
|
||||
let localDefaults: any = { enabled: false };
|
||||
try {
|
||||
const imported = require('./environment.local');
|
||||
localDefaults = imported.localFormDefaults || { enabled: false };
|
||||
} catch (e) {
|
||||
// environment.local.ts doesn't exist, which is fine
|
||||
}
|
||||
|
||||
export const environment = {
|
||||
production: false,
|
||||
|
||||
// API Configuration
|
||||
apiBase: '/api/cf',
|
||||
|
||||
// Upload Configuration
|
||||
chunkSize: 1048576, // 1MB in bytes
|
||||
|
||||
// Feature Flags
|
||||
enableSslValidation: false, // Show/hide SSL validation checkbox
|
||||
|
||||
// Polling Configuration
|
||||
pollInterval: 5000, // 5 seconds in milliseconds
|
||||
maxPollAttempts: 120, // 10 minutes total (120 * 5 seconds)
|
||||
|
||||
// UI Configuration
|
||||
defaultLogsExpanded: true,
|
||||
showDebugInfo: false,
|
||||
|
||||
// Form Defaults (auto-populated from environment.local.ts in development only)
|
||||
formDefaults: localDefaults
|
||||
};
|
||||
12
frontend/src/index.html
Normal file
12
frontend/src/index.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>CF Deployer</title>
|
||||
<base href="/">
|
||||
</head>
|
||||
<body>
|
||||
<app-root></app-root>
|
||||
</body>
|
||||
</html>
|
||||
9
frontend/src/main.ts
Normal file
9
frontend/src/main.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { bootstrapApplication } from '@angular/platform-browser';
|
||||
import { provideHttpClient } from '@angular/common/http';
|
||||
import { AppComponent } from './app/app.component';
|
||||
|
||||
bootstrapApplication(AppComponent, {
|
||||
providers: [
|
||||
provideHttpClient()
|
||||
]
|
||||
}).catch(err => console.error(err));
|
||||
17
frontend/src/styles.css
Normal file
17
frontend/src/styles.css
Normal file
@@ -0,0 +1,17 @@
|
||||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
background: #010409;
|
||||
min-height: 100vh;
|
||||
padding: 20px 0;
|
||||
color: #c9d1d9;
|
||||
}
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
30
frontend/tsconfig.json
Normal file
30
frontend/tsconfig.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"compileOnSave": false,
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist/out-tsc",
|
||||
"strict": true,
|
||||
"noImplicitOverride": true,
|
||||
"noPropertyAccessFromIndexSignature": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"skipLibCheck": true,
|
||||
"esModuleInterop": true,
|
||||
"sourceMap": true,
|
||||
"declaration": false,
|
||||
"experimentalDecorators": true,
|
||||
"moduleResolution": "bundler",
|
||||
"importHelpers": true,
|
||||
"target": "ES2022",
|
||||
"module": "ES2022",
|
||||
"lib": [
|
||||
"ES2022",
|
||||
"dom"
|
||||
]
|
||||
},
|
||||
"angularCompilerOptions": {
|
||||
"enableI18nLegacyMessageIdFormat": false,
|
||||
"strictInjectionParameters": true,
|
||||
"strictInputAccessModifiers": true,
|
||||
"strictTemplates": true
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,12 @@ package com.cfdeployer;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.scheduling.annotation.EnableAsync;
|
||||
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
|
||||
@SpringBootApplication
|
||||
@EnableScheduling
|
||||
@EnableAsync
|
||||
public class CfDeployerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
26
src/main/java/com/cfdeployer/config/MultipartConfig.java
Normal file
26
src/main/java/com/cfdeployer/config/MultipartConfig.java
Normal file
@@ -0,0 +1,26 @@
|
||||
package com.cfdeployer.config;
|
||||
|
||||
import jakarta.servlet.MultipartConfigElement;
|
||||
import org.springframework.boot.web.servlet.MultipartConfigFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.util.unit.DataSize;
|
||||
|
||||
@Configuration
|
||||
public class MultipartConfig {
|
||||
|
||||
@Bean
|
||||
public MultipartConfigElement multipartConfigElement() {
|
||||
MultipartConfigFactory factory = new MultipartConfigFactory();
|
||||
|
||||
// Set max file size for chunks (10MB per request is safe)
|
||||
factory.setMaxFileSize(DataSize.ofMegabytes(10));
|
||||
factory.setMaxRequestSize(DataSize.ofMegabytes(10));
|
||||
|
||||
// Important: Set file size threshold to write to disk immediately
|
||||
// Setting to 0 means all uploads go directly to disk, not memory
|
||||
factory.setFileSizeThreshold(DataSize.ofBytes(0));
|
||||
|
||||
return factory.createMultipartConfig();
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,12 @@ package com.cfdeployer.controller;
|
||||
|
||||
import com.cfdeployer.model.CfDeployRequest;
|
||||
import com.cfdeployer.model.CfDeployResponse;
|
||||
import com.cfdeployer.model.ChunkUploadRequest;
|
||||
import com.cfdeployer.model.ChunkUploadResponse;
|
||||
import com.cfdeployer.model.DeploymentStatus;
|
||||
import com.cfdeployer.service.AsyncDeploymentService;
|
||||
import com.cfdeployer.service.CfCliService;
|
||||
import com.cfdeployer.service.ChunkedUploadService;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import jakarta.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
@@ -22,6 +27,8 @@ public class CfDeployController {
|
||||
private static final Logger log = LoggerFactory.getLogger(CfDeployController.class);
|
||||
|
||||
private final CfCliService cfCliService;
|
||||
private final ChunkedUploadService chunkedUploadService;
|
||||
private final AsyncDeploymentService asyncDeploymentService;
|
||||
private final ObjectMapper objectMapper;
|
||||
|
||||
@PostMapping(value = "/deploy", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
|
||||
@@ -118,6 +125,265 @@ public class CfDeployController {
|
||||
}
|
||||
}
|
||||
|
||||
// Chunked upload endpoints
|
||||
@PostMapping("/upload/init")
|
||||
public ResponseEntity<ChunkUploadResponse> initUpload(@RequestBody String requestJson) {
|
||||
try {
|
||||
log.info("Initializing chunked upload session");
|
||||
|
||||
// Validate the request JSON
|
||||
CfDeployRequest request = objectMapper.readValue(requestJson, CfDeployRequest.class);
|
||||
log.info("Creating upload session for app: {}", request.getAppName());
|
||||
|
||||
String sessionId = chunkedUploadService.createUploadSession(requestJson);
|
||||
|
||||
return ResponseEntity.ok(ChunkUploadResponse.builder()
|
||||
.success(true)
|
||||
.uploadSessionId(sessionId)
|
||||
.message("Upload session created successfully")
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
log.error("Error initializing upload session", e);
|
||||
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||
.body(ChunkUploadResponse.failure("Failed to initialize upload: " + e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/upload/chunk")
|
||||
public ResponseEntity<ChunkUploadResponse> uploadChunk(
|
||||
@RequestParam("uploadSessionId") String uploadSessionId,
|
||||
@RequestParam("fileType") String fileType,
|
||||
@RequestParam("chunkIndex") Integer chunkIndex,
|
||||
@RequestParam("totalChunks") Integer totalChunks,
|
||||
@RequestParam(value = "fileName", required = false) String fileName,
|
||||
@RequestPart("chunk") MultipartFile chunk) {
|
||||
try {
|
||||
log.debug("Receiving chunk {}/{} for session: {}, fileType: {}",
|
||||
chunkIndex + 1, totalChunks, uploadSessionId, fileType);
|
||||
|
||||
// Validate file type
|
||||
if (!fileType.equals("jarFile") && !fileType.equals("manifest")) {
|
||||
throw new IllegalArgumentException("Invalid file type. Must be 'jarFile' or 'manifest'");
|
||||
}
|
||||
|
||||
chunkedUploadService.uploadChunk(uploadSessionId, fileType, fileName,
|
||||
chunkIndex, totalChunks, chunk);
|
||||
|
||||
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||
var fileState = session.getFileStates().get(fileType);
|
||||
|
||||
return ResponseEntity.ok(ChunkUploadResponse.success(
|
||||
uploadSessionId, fileType, chunkIndex, totalChunks,
|
||||
fileState.getReceivedChunkCount()));
|
||||
} catch (Exception e) {
|
||||
log.error("Error uploading chunk", e);
|
||||
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||
.body(ChunkUploadResponse.failure("Failed to upload chunk: " + e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping(value = "/upload/chunk", consumes = "application/octet-stream")
|
||||
public ResponseEntity<ChunkUploadResponse> uploadChunkRaw(
|
||||
@RequestParam("uploadSessionId") String uploadSessionId,
|
||||
@RequestParam("fileType") String fileType,
|
||||
@RequestParam("chunkIndex") Integer chunkIndex,
|
||||
@RequestParam("totalChunks") Integer totalChunks,
|
||||
@RequestParam(value = "fileName", required = false) String fileName,
|
||||
@RequestBody byte[] chunkData) {
|
||||
try {
|
||||
log.debug("Receiving raw chunk {}/{} for session: {}, fileType: {} ({} bytes)",
|
||||
chunkIndex + 1, totalChunks, uploadSessionId, fileType, chunkData.length);
|
||||
|
||||
// Validate file type
|
||||
if (!fileType.equals("jarFile") && !fileType.equals("manifest")) {
|
||||
throw new IllegalArgumentException("Invalid file type. Must be 'jarFile' or 'manifest'");
|
||||
}
|
||||
|
||||
chunkedUploadService.uploadChunkRaw(uploadSessionId, fileType, fileName,
|
||||
chunkIndex, totalChunks, chunkData);
|
||||
|
||||
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||
var fileState = session.getFileStates().get(fileType);
|
||||
|
||||
return ResponseEntity.ok(ChunkUploadResponse.success(
|
||||
uploadSessionId, fileType, chunkIndex, totalChunks,
|
||||
fileState.getReceivedChunkCount()));
|
||||
} catch (Exception e) {
|
||||
log.error("Error uploading raw chunk", e);
|
||||
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||
.body(ChunkUploadResponse.failure("Failed to upload chunk: " + e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping(value = "/upload/chunk", consumes = "text/plain")
|
||||
public ResponseEntity<ChunkUploadResponse> uploadChunkBase64(
|
||||
@RequestParam("uploadSessionId") String uploadSessionId,
|
||||
@RequestParam("fileType") String fileType,
|
||||
@RequestParam("chunkIndex") Integer chunkIndex,
|
||||
@RequestParam("totalChunks") Integer totalChunks,
|
||||
@RequestParam(value = "fileName", required = false) String fileName,
|
||||
@RequestHeader(value = "X-Chunk-Encoding", required = false) String encoding,
|
||||
@RequestBody String chunkDataBase64) {
|
||||
try {
|
||||
log.debug("Receiving base64 chunk {}/{} for session: {}, fileType: {}",
|
||||
chunkIndex + 1, totalChunks, uploadSessionId, fileType);
|
||||
|
||||
// Validate file type
|
||||
if (!fileType.equals("jarFile") && !fileType.equals("manifest")) {
|
||||
throw new IllegalArgumentException("Invalid file type. Must be 'jarFile' or 'manifest'");
|
||||
}
|
||||
|
||||
// Decode base64 to binary
|
||||
byte[] chunkData;
|
||||
if ("base64".equalsIgnoreCase(encoding)) {
|
||||
chunkData = java.util.Base64.getDecoder().decode(chunkDataBase64);
|
||||
log.debug("Decoded base64 chunk: {} chars -> {} bytes", chunkDataBase64.length(), chunkData.length);
|
||||
} else {
|
||||
// Fallback: treat as raw bytes from string
|
||||
chunkData = chunkDataBase64.getBytes(java.nio.charset.StandardCharsets.ISO_8859_1);
|
||||
}
|
||||
|
||||
chunkedUploadService.uploadChunkRaw(uploadSessionId, fileType, fileName,
|
||||
chunkIndex, totalChunks, chunkData);
|
||||
|
||||
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||
var fileState = session.getFileStates().get(fileType);
|
||||
|
||||
return ResponseEntity.ok(ChunkUploadResponse.success(
|
||||
uploadSessionId, fileType, chunkIndex, totalChunks,
|
||||
fileState.getReceivedChunkCount()));
|
||||
} catch (Exception e) {
|
||||
log.error("Error uploading base64 chunk", e);
|
||||
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||
.body(ChunkUploadResponse.failure("Failed to upload chunk: " + e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/upload/finalize")
|
||||
public ResponseEntity<DeploymentStatus> finalizeUpload(
|
||||
@RequestParam("uploadSessionId") String uploadSessionId,
|
||||
@RequestParam(value = "async", defaultValue = "true") boolean async) {
|
||||
try {
|
||||
log.info("Finalizing upload for session: {} (async={})", uploadSessionId, async);
|
||||
|
||||
if (!chunkedUploadService.isSessionReady(uploadSessionId)) {
|
||||
return ResponseEntity.status(HttpStatus.BAD_REQUEST)
|
||||
.body(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message("Upload incomplete. Not all file chunks received.")
|
||||
.build());
|
||||
}
|
||||
|
||||
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||
if (session == null) {
|
||||
return ResponseEntity.status(HttpStatus.NOT_FOUND)
|
||||
.body(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message("Upload session not found or expired")
|
||||
.build());
|
||||
}
|
||||
|
||||
CfDeployRequest request = objectMapper.readValue(session.getRequestJson(), CfDeployRequest.class);
|
||||
log.info("Starting deployment for app: {} from session: {}",
|
||||
request.getAppName(), uploadSessionId);
|
||||
|
||||
// Get file paths from session
|
||||
var jarState = session.getFileStates().get("jarFile");
|
||||
var manifestState = session.getFileStates().get("manifest");
|
||||
|
||||
if (async) {
|
||||
// Start async deployment - returns immediately
|
||||
asyncDeploymentService.deployAsync(uploadSessionId, request,
|
||||
jarState.getTargetPath(), manifestState.getTargetPath());
|
||||
|
||||
return ResponseEntity.accepted().body(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.IN_PROGRESS)
|
||||
.message("Deployment started. Use /deployment/status endpoint to check progress.")
|
||||
.progress(0)
|
||||
.build());
|
||||
} else {
|
||||
// Synchronous deployment - waits for completion (may timeout!)
|
||||
CfDeployResponse response = cfCliService.deployApplicationFromPaths(
|
||||
request,
|
||||
jarState.getTargetPath(),
|
||||
manifestState.getTargetPath());
|
||||
|
||||
// Clean up session after deployment
|
||||
chunkedUploadService.deleteSession(uploadSessionId);
|
||||
|
||||
if (Boolean.TRUE.equals(response.getSuccess())) {
|
||||
return ResponseEntity.ok(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.COMPLETED)
|
||||
.message(response.getMessage())
|
||||
.output(response.getOutput())
|
||||
.progress(100)
|
||||
.build());
|
||||
} else {
|
||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||
.body(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message(response.getMessage())
|
||||
.error(response.getError())
|
||||
.build());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Error finalizing upload", e);
|
||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||
.body(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message("Failed to finalize deployment: " + e.getMessage())
|
||||
.error(e.toString())
|
||||
.build());
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/deployment/status/{uploadSessionId}")
|
||||
public ResponseEntity<DeploymentStatus> getDeploymentStatus(@PathVariable String uploadSessionId) {
|
||||
try {
|
||||
DeploymentStatus status = asyncDeploymentService.getDeploymentStatus(uploadSessionId);
|
||||
|
||||
// Clean up session and deployment status if completed or failed
|
||||
if (status.getStatus() == DeploymentStatus.Status.COMPLETED ||
|
||||
status.getStatus() == DeploymentStatus.Status.FAILED) {
|
||||
chunkedUploadService.deleteSession(uploadSessionId);
|
||||
}
|
||||
|
||||
return ResponseEntity.ok(status);
|
||||
} catch (Exception e) {
|
||||
log.error("Error getting deployment status", e);
|
||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||
.body(DeploymentStatus.builder()
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message("Failed to get deployment status: " + e.getMessage())
|
||||
.build());
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/upload/status/{uploadSessionId}")
|
||||
public ResponseEntity<?> getUploadStatus(@PathVariable String uploadSessionId) {
|
||||
try {
|
||||
var session = chunkedUploadService.getSession(uploadSessionId);
|
||||
if (session == null) {
|
||||
return ResponseEntity.status(HttpStatus.NOT_FOUND)
|
||||
.body("Upload session not found or expired");
|
||||
}
|
||||
|
||||
return ResponseEntity.ok(session.getFileStates());
|
||||
} catch (Exception e) {
|
||||
log.error("Error getting upload status", e);
|
||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||
.body("Failed to get upload status: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void validateFiles(MultipartFile jarFile, MultipartFile manifest) {
|
||||
if (jarFile.isEmpty()) {
|
||||
throw new IllegalArgumentException("JAR file is empty");
|
||||
|
||||
26
src/main/java/com/cfdeployer/model/ChunkUploadRequest.java
Normal file
26
src/main/java/com/cfdeployer/model/ChunkUploadRequest.java
Normal file
@@ -0,0 +1,26 @@
|
||||
package com.cfdeployer.model;
|
||||
|
||||
import jakarta.validation.constraints.Min;
|
||||
import jakarta.validation.constraints.NotBlank;
|
||||
import jakarta.validation.constraints.NotNull;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
public class ChunkUploadRequest {
|
||||
|
||||
@NotBlank(message = "Upload session ID is required")
|
||||
private String uploadSessionId;
|
||||
|
||||
@NotBlank(message = "File type is required (jarFile or manifest)")
|
||||
private String fileType; // "jarFile" or "manifest"
|
||||
|
||||
@NotNull(message = "Chunk index is required")
|
||||
@Min(value = 0, message = "Chunk index must be non-negative")
|
||||
private Integer chunkIndex;
|
||||
|
||||
@NotNull(message = "Total chunks is required")
|
||||
@Min(value = 1, message = "Total chunks must be at least 1")
|
||||
private Integer totalChunks;
|
||||
|
||||
private String fileName;
|
||||
}
|
||||
41
src/main/java/com/cfdeployer/model/ChunkUploadResponse.java
Normal file
41
src/main/java/com/cfdeployer/model/ChunkUploadResponse.java
Normal file
@@ -0,0 +1,41 @@
|
||||
package com.cfdeployer.model;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@Builder
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class ChunkUploadResponse {
|
||||
|
||||
private Boolean success;
|
||||
private String uploadSessionId;
|
||||
private String fileType;
|
||||
private Integer chunkIndex;
|
||||
private Integer totalChunks;
|
||||
private Integer receivedChunks;
|
||||
private String message;
|
||||
|
||||
public static ChunkUploadResponse success(String uploadSessionId, String fileType,
|
||||
Integer chunkIndex, Integer totalChunks, Integer receivedChunks) {
|
||||
return ChunkUploadResponse.builder()
|
||||
.success(true)
|
||||
.uploadSessionId(uploadSessionId)
|
||||
.fileType(fileType)
|
||||
.chunkIndex(chunkIndex)
|
||||
.totalChunks(totalChunks)
|
||||
.receivedChunks(receivedChunks)
|
||||
.message("Chunk uploaded successfully")
|
||||
.build();
|
||||
}
|
||||
|
||||
public static ChunkUploadResponse failure(String message) {
|
||||
return ChunkUploadResponse.builder()
|
||||
.success(false)
|
||||
.message(message)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
27
src/main/java/com/cfdeployer/model/DeploymentStatus.java
Normal file
27
src/main/java/com/cfdeployer/model/DeploymentStatus.java
Normal file
@@ -0,0 +1,27 @@
|
||||
package com.cfdeployer.model;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import lombok.NoArgsConstructor;
|
||||
|
||||
@Data
|
||||
@Builder
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public class DeploymentStatus {
|
||||
|
||||
public enum Status {
|
||||
PENDING, // Upload complete, deployment queued
|
||||
IN_PROGRESS, // Currently deploying
|
||||
COMPLETED, // Deployment successful
|
||||
FAILED // Deployment failed
|
||||
}
|
||||
|
||||
private String uploadSessionId;
|
||||
private Status status;
|
||||
private String message;
|
||||
private String output;
|
||||
private String error;
|
||||
private Integer progress; // 0-100
|
||||
}
|
||||
61
src/main/java/com/cfdeployer/model/UploadSession.java
Normal file
61
src/main/java/com/cfdeployer/model/UploadSession.java
Normal file
@@ -0,0 +1,61 @@
|
||||
package com.cfdeployer.model;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Data
|
||||
public class UploadSession {
|
||||
|
||||
private String sessionId;
|
||||
private String requestJson;
|
||||
private Path workingDirectory;
|
||||
private LocalDateTime createdAt;
|
||||
private LocalDateTime lastAccessedAt;
|
||||
|
||||
// File type -> chunk tracking
|
||||
private Map<String, FileUploadState> fileStates;
|
||||
|
||||
public UploadSession(String sessionId, String requestJson, Path workingDirectory) {
|
||||
this.sessionId = sessionId;
|
||||
this.requestJson = requestJson;
|
||||
this.workingDirectory = workingDirectory;
|
||||
this.createdAt = LocalDateTime.now();
|
||||
this.lastAccessedAt = LocalDateTime.now();
|
||||
this.fileStates = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
public void updateLastAccessed() {
|
||||
this.lastAccessedAt = LocalDateTime.now();
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class FileUploadState {
|
||||
private String fileName;
|
||||
private int totalChunks;
|
||||
private Map<Integer, Boolean> receivedChunks;
|
||||
private Path targetPath;
|
||||
|
||||
public FileUploadState(String fileName, int totalChunks, Path targetPath) {
|
||||
this.fileName = fileName;
|
||||
this.totalChunks = totalChunks;
|
||||
this.targetPath = targetPath;
|
||||
this.receivedChunks = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
public void markChunkReceived(int chunkIndex) {
|
||||
receivedChunks.put(chunkIndex, true);
|
||||
}
|
||||
|
||||
public boolean isComplete() {
|
||||
return receivedChunks.size() == totalChunks;
|
||||
}
|
||||
|
||||
public int getReceivedChunkCount() {
|
||||
return receivedChunks.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,99 @@
|
||||
package com.cfdeployer.service;
|
||||
|
||||
import com.cfdeployer.model.CfDeployRequest;
|
||||
import com.cfdeployer.model.CfDeployResponse;
|
||||
import com.cfdeployer.model.DeploymentStatus;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.scheduling.annotation.Async;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class AsyncDeploymentService {
|
||||
|
||||
private final CfCliService cfCliService;
|
||||
private final Map<String, DeploymentStatus> deploymentStatuses = new ConcurrentHashMap<>();
|
||||
|
||||
@Async
|
||||
public void deployAsync(String sessionId, CfDeployRequest request, Path jarPath, Path manifestPath) {
|
||||
log.info("Starting async deployment for session: {}", sessionId);
|
||||
|
||||
// Set initial status
|
||||
deploymentStatuses.put(sessionId, DeploymentStatus.builder()
|
||||
.uploadSessionId(sessionId)
|
||||
.status(DeploymentStatus.Status.IN_PROGRESS)
|
||||
.message("Deployment in progress...")
|
||||
.progress(0)
|
||||
.build());
|
||||
|
||||
try {
|
||||
// Update progress
|
||||
updateProgress(sessionId, 10, "Logging into Cloud Foundry...");
|
||||
|
||||
CfDeployResponse response = cfCliService.deployApplicationFromPaths(request, jarPath, manifestPath);
|
||||
|
||||
updateProgress(sessionId, 100, "Deployment completed");
|
||||
|
||||
// Set final status
|
||||
if (Boolean.TRUE.equals(response.getSuccess())) {
|
||||
deploymentStatuses.put(sessionId, DeploymentStatus.builder()
|
||||
.uploadSessionId(sessionId)
|
||||
.status(DeploymentStatus.Status.COMPLETED)
|
||||
.message(response.getMessage())
|
||||
.output(response.getOutput())
|
||||
.progress(100)
|
||||
.build());
|
||||
} else {
|
||||
deploymentStatuses.put(sessionId, DeploymentStatus.builder()
|
||||
.uploadSessionId(sessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message(response.getMessage())
|
||||
.error(response.getError())
|
||||
.progress(0)
|
||||
.build());
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Async deployment failed for session: {}", sessionId, e);
|
||||
deploymentStatuses.put(sessionId, DeploymentStatus.builder()
|
||||
.uploadSessionId(sessionId)
|
||||
.status(DeploymentStatus.Status.FAILED)
|
||||
.message("Deployment failed: " + e.getMessage())
|
||||
.error(e.toString())
|
||||
.progress(0)
|
||||
.build());
|
||||
}
|
||||
}
|
||||
|
||||
public DeploymentStatus getDeploymentStatus(String sessionId) {
|
||||
DeploymentStatus status = deploymentStatuses.get(sessionId);
|
||||
if (status == null) {
|
||||
return DeploymentStatus.builder()
|
||||
.uploadSessionId(sessionId)
|
||||
.status(DeploymentStatus.Status.PENDING)
|
||||
.message("No deployment found for this session")
|
||||
.build();
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
public void clearDeploymentStatus(String sessionId) {
|
||||
deploymentStatuses.remove(sessionId);
|
||||
log.debug("Cleared deployment status for session: {}", sessionId);
|
||||
}
|
||||
|
||||
private void updateProgress(String sessionId, int progress, String message) {
|
||||
DeploymentStatus current = deploymentStatuses.get(sessionId);
|
||||
if (current != null) {
|
||||
current.setProgress(progress);
|
||||
current.setMessage(message);
|
||||
log.info("Session {}: {} ({}%)", sessionId, message, progress);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -84,6 +84,41 @@ public class CfCliService {
|
||||
}
|
||||
}
|
||||
|
||||
public CfDeployResponse deployApplicationFromPaths(CfDeployRequest request, Path jarPath, Path manifestPath) {
|
||||
try {
|
||||
log.info("=== Starting deployment from paths for app: {} ===", request.getAppName());
|
||||
log.info("Target: {}/{}/{}", request.getApiEndpoint(), request.getOrganization(), request.getSpace());
|
||||
log.info("JAR path: {}", jarPath);
|
||||
log.info("Manifest path: {}", manifestPath);
|
||||
|
||||
// Validate files exist
|
||||
if (!Files.exists(jarPath)) {
|
||||
throw new IOException("JAR file not found at: " + jarPath);
|
||||
}
|
||||
if (!Files.exists(manifestPath)) {
|
||||
throw new IOException("Manifest file not found at: " + manifestPath);
|
||||
}
|
||||
|
||||
log.info("JAR file size: {} bytes", Files.size(jarPath));
|
||||
log.info("Manifest file size: {} bytes", Files.size(manifestPath));
|
||||
|
||||
StringBuilder output = new StringBuilder();
|
||||
|
||||
login(request, output);
|
||||
pushApplication(request, manifestPath.getParent(), jarPath, output);
|
||||
logout(output);
|
||||
|
||||
log.info("=== Deployment completed successfully for app: {} ===", request.getAppName());
|
||||
return CfDeployResponse.success(output.toString());
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("=== Deployment failed for app: {} ===", request.getAppName());
|
||||
log.error("Error type: {}", e.getClass().getName());
|
||||
log.error("Error message: {}", e.getMessage(), e);
|
||||
return CfDeployResponse.failure(e.getMessage(), e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private void login(CfDeployRequest request, StringBuilder output) throws Exception {
|
||||
log.info("Logging into Cloud Foundry at: {}", request.getApiEndpoint());
|
||||
|
||||
@@ -224,7 +259,8 @@ public class CfCliService {
|
||||
log.info("Created temp file: {}", tempFile.getAbsolutePath());
|
||||
|
||||
// Copy from direct file path to temp file
|
||||
long bytesCopied = Files.copy(directPath, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
|
||||
Files.copy(directPath, tempFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
|
||||
long bytesCopied = Files.size(tempFile.toPath());
|
||||
log.info("Copied CF CLI to temp file: {} ({} bytes)", tempFile.getAbsolutePath(), bytesCopied);
|
||||
|
||||
if (bytesCopied == 0) {
|
||||
|
||||
230
src/main/java/com/cfdeployer/service/ChunkedUploadService.java
Normal file
230
src/main/java/com/cfdeployer/service/ChunkedUploadService.java
Normal file
@@ -0,0 +1,230 @@
|
||||
package com.cfdeployer.service;
|
||||
|
||||
import com.cfdeployer.model.UploadSession;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.web.multipart.MultipartFile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Service
|
||||
@Slf4j
|
||||
public class ChunkedUploadService {
|
||||
|
||||
@Value("${cf.upload.session.timeout-minutes:30}")
|
||||
private int sessionTimeoutMinutes;
|
||||
|
||||
private final Map<String, UploadSession> activeSessions = new ConcurrentHashMap<>();
|
||||
|
||||
public String createUploadSession(String requestJson) throws IOException {
|
||||
String sessionId = UUID.randomUUID().toString();
|
||||
Path workingDir = Files.createTempDirectory("cf-upload-" + sessionId);
|
||||
|
||||
UploadSession session = new UploadSession(sessionId, requestJson, workingDir);
|
||||
activeSessions.put(sessionId, session);
|
||||
|
||||
log.info("Created upload session: {} at {}", sessionId, workingDir);
|
||||
return sessionId;
|
||||
}
|
||||
|
||||
public synchronized void uploadChunk(String sessionId, String fileType, String fileName,
|
||||
int chunkIndex, int totalChunks, MultipartFile chunk) throws IOException {
|
||||
UploadSession session = activeSessions.get(sessionId);
|
||||
if (session == null) {
|
||||
throw new IllegalArgumentException("Upload session not found or expired: " + sessionId);
|
||||
}
|
||||
|
||||
session.updateLastAccessed();
|
||||
|
||||
// Get or create file upload state
|
||||
UploadSession.FileUploadState fileState = session.getFileStates()
|
||||
.computeIfAbsent(fileType, k -> {
|
||||
String targetFileName = fileType.equals("manifest") ? "manifest.yml" : fileName;
|
||||
Path targetPath = session.getWorkingDirectory().resolve(targetFileName);
|
||||
return new UploadSession.FileUploadState(fileName, totalChunks, targetPath);
|
||||
});
|
||||
|
||||
// Validate total chunks consistency
|
||||
if (fileState.getTotalChunks() != totalChunks) {
|
||||
throw new IllegalArgumentException(
|
||||
String.format("Total chunks mismatch for %s: expected %d, got %d",
|
||||
fileType, fileState.getTotalChunks(), totalChunks));
|
||||
}
|
||||
|
||||
// Write chunk to file using sequential append mode
|
||||
// This supports variable chunk sizes - chunks MUST be uploaded in order (0, 1, 2, ...)
|
||||
Path targetPath = fileState.getTargetPath();
|
||||
|
||||
// Verify chunks are uploaded in order
|
||||
if (chunkIndex != fileState.getReceivedChunkCount()) {
|
||||
throw new IllegalArgumentException(
|
||||
String.format("Chunks must be uploaded in order. Expected chunk %d but received %d",
|
||||
fileState.getReceivedChunkCount(), chunkIndex));
|
||||
}
|
||||
|
||||
try (var inputStream = chunk.getInputStream();
|
||||
var outputStream = Files.newOutputStream(targetPath,
|
||||
java.nio.file.StandardOpenOption.CREATE,
|
||||
java.nio.file.StandardOpenOption.APPEND)) {
|
||||
|
||||
// Stream chunk data in smaller buffers to reduce memory pressure
|
||||
byte[] buffer = new byte[8192]; // 8KB buffer
|
||||
int bytesRead;
|
||||
long totalWritten = 0;
|
||||
|
||||
while ((bytesRead = inputStream.read(buffer)) != -1) {
|
||||
outputStream.write(buffer, 0, bytesRead);
|
||||
totalWritten += bytesRead;
|
||||
}
|
||||
|
||||
log.debug("Appended chunk {} ({} bytes) to {}",
|
||||
chunkIndex, totalWritten, targetPath.getFileName());
|
||||
}
|
||||
|
||||
fileState.markChunkReceived(chunkIndex);
|
||||
log.info("Session {}: Received chunk {}/{} for {} ({} bytes)",
|
||||
sessionId, chunkIndex + 1, totalChunks, fileType, chunk.getSize());
|
||||
|
||||
if (fileState.isComplete()) {
|
||||
log.info("Session {}: File {} upload completed ({} chunks)",
|
||||
sessionId, fileType, totalChunks);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void uploadChunkRaw(String sessionId, String fileType, String fileName,
|
||||
int chunkIndex, int totalChunks, byte[] chunkData) throws IOException {
|
||||
UploadSession session = activeSessions.get(sessionId);
|
||||
if (session == null) {
|
||||
throw new IllegalArgumentException("Upload session not found or expired: " + sessionId);
|
||||
}
|
||||
|
||||
session.updateLastAccessed();
|
||||
|
||||
// Get or create file upload state
|
||||
UploadSession.FileUploadState fileState = session.getFileStates()
|
||||
.computeIfAbsent(fileType, k -> {
|
||||
String targetFileName = fileType.equals("manifest") ? "manifest.yml" : fileName;
|
||||
Path targetPath = session.getWorkingDirectory().resolve(targetFileName);
|
||||
return new UploadSession.FileUploadState(fileName, totalChunks, targetPath);
|
||||
});
|
||||
|
||||
// Validate total chunks consistency
|
||||
if (fileState.getTotalChunks() != totalChunks) {
|
||||
throw new IllegalArgumentException(
|
||||
String.format("Total chunks mismatch for %s: expected %d, got %d",
|
||||
fileType, fileState.getTotalChunks(), totalChunks));
|
||||
}
|
||||
|
||||
// Write chunk to file using sequential append mode
|
||||
// This supports variable chunk sizes - chunks MUST be uploaded in order (0, 1, 2, ...)
|
||||
Path targetPath = fileState.getTargetPath();
|
||||
|
||||
// Verify chunks are uploaded in order
|
||||
if (chunkIndex != fileState.getReceivedChunkCount()) {
|
||||
throw new IllegalArgumentException(
|
||||
String.format("Chunks must be uploaded in order. Expected chunk %d but received %d",
|
||||
fileState.getReceivedChunkCount(), chunkIndex));
|
||||
}
|
||||
|
||||
try (var outputStream = Files.newOutputStream(targetPath,
|
||||
java.nio.file.StandardOpenOption.CREATE,
|
||||
java.nio.file.StandardOpenOption.APPEND)) {
|
||||
|
||||
// Write raw byte array directly
|
||||
outputStream.write(chunkData);
|
||||
|
||||
log.debug("Appended raw chunk {} ({} bytes) to {}",
|
||||
chunkIndex, chunkData.length, targetPath.getFileName());
|
||||
}
|
||||
|
||||
fileState.markChunkReceived(chunkIndex);
|
||||
log.info("Session {}: Received raw chunk {}/{} for {} ({} bytes)",
|
||||
sessionId, chunkIndex + 1, totalChunks, fileType, chunkData.length);
|
||||
|
||||
if (fileState.isComplete()) {
|
||||
log.info("Session {}: File {} upload completed ({} chunks)",
|
||||
sessionId, fileType, totalChunks);
|
||||
}
|
||||
}
|
||||
|
||||
public UploadSession getSession(String sessionId) {
|
||||
UploadSession session = activeSessions.get(sessionId);
|
||||
if (session != null) {
|
||||
session.updateLastAccessed();
|
||||
}
|
||||
return session;
|
||||
}
|
||||
|
||||
public boolean isSessionReady(String sessionId) {
|
||||
UploadSession session = activeSessions.get(sessionId);
|
||||
if (session == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if both jarFile and manifest are complete
|
||||
UploadSession.FileUploadState jarState = session.getFileStates().get("jarFile");
|
||||
UploadSession.FileUploadState manifestState = session.getFileStates().get("manifest");
|
||||
|
||||
return jarState != null && jarState.isComplete() &&
|
||||
manifestState != null && manifestState.isComplete();
|
||||
}
|
||||
|
||||
public void deleteSession(String sessionId) {
|
||||
UploadSession session = activeSessions.remove(sessionId);
|
||||
if (session != null) {
|
||||
cleanupSessionDirectory(session);
|
||||
log.info("Deleted upload session: {}", sessionId);
|
||||
}
|
||||
}
|
||||
|
||||
@Scheduled(fixedRate = 300000) // Run every 5 minutes
|
||||
public void cleanupExpiredSessions() {
|
||||
LocalDateTime expirationTime = LocalDateTime.now().minusMinutes(sessionTimeoutMinutes);
|
||||
int cleanedCount = 0;
|
||||
|
||||
for (Map.Entry<String, UploadSession> entry : activeSessions.entrySet()) {
|
||||
if (entry.getValue().getLastAccessedAt().isBefore(expirationTime)) {
|
||||
deleteSession(entry.getKey());
|
||||
cleanedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
if (cleanedCount > 0) {
|
||||
log.info("Cleaned up {} expired upload sessions", cleanedCount);
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanupSessionDirectory(UploadSession session) {
|
||||
try {
|
||||
Path workingDir = session.getWorkingDirectory();
|
||||
if (Files.exists(workingDir)) {
|
||||
Files.walk(workingDir)
|
||||
.sorted(Comparator.reverseOrder())
|
||||
.forEach(path -> {
|
||||
try {
|
||||
Files.delete(path);
|
||||
} catch (IOException e) {
|
||||
log.warn("Failed to delete file: {}", path, e);
|
||||
}
|
||||
});
|
||||
log.debug("Cleaned up session directory: {}", workingDir);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
log.warn("Failed to clean up session directory for session: {}", session.getSessionId(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public int getActiveSessionCount() {
|
||||
return activeSessions.size();
|
||||
}
|
||||
}
|
||||
@@ -4,15 +4,23 @@ server.port=8080
|
||||
# Application Name
|
||||
spring.application.name=cf-deployer
|
||||
|
||||
# Multipart Configuration
|
||||
spring.servlet.multipart.max-file-size=500MB
|
||||
spring.servlet.multipart.max-request-size=500MB
|
||||
# Multipart Configuration - for traditional single upload endpoint
|
||||
spring.servlet.multipart.max-file-size=10MB
|
||||
spring.servlet.multipart.max-request-size=10MB
|
||||
spring.servlet.multipart.enabled=true
|
||||
# Write all uploads directly to disk, not memory (prevents OutOfMemoryError)
|
||||
spring.servlet.multipart.file-size-threshold=0
|
||||
|
||||
# Cloud Foundry CLI Configuration
|
||||
cf.cli.timeout=600
|
||||
cf.cli.path=
|
||||
|
||||
# Chunked Upload Configuration
|
||||
# Session timeout in minutes (default: 30 minutes)
|
||||
cf.upload.session.timeout-minutes=30
|
||||
# Note: Chunk size is controlled by the client, not the server.
|
||||
# Server accepts any chunk size and appends chunks sequentially.
|
||||
|
||||
# Logging Configuration
|
||||
logging.level.root=INFO
|
||||
logging.level.com.cfdeployer=DEBUG
|
||||
|
||||
Reference in New Issue
Block a user