59 Add S3 Bucket Provisioner

This commit is contained in:
Evan Cohen-Doty
2026-02-04 11:32:12 -08:00
parent dcd405679a
commit 1b706fe858
6 changed files with 258 additions and 0 deletions

View File

@@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Added
- Added S3 bucket provisioning terraform configuration (#59)
- Creates an S3 bucket to be used for anything Orchard
- Creates a log bucket for any logs tracking the S3 bucket
- Added transparent PyPI proxy implementing PEP 503 Simple API (#108)
- `GET /pypi/simple/` - package index (proxied from upstream)
- `GET /pypi/simple/{package}/` - version list with rewritten download links

View File

@@ -0,0 +1,19 @@
{
"name": "EC2 Provisioner Dev Container",
"image": "registry.global.bsf.tools/esv/bsf/bsf-integration/dev-env-setup/provisioner_image:v0.18.1",
"mounts": [
"source=${localEnv:HOME}/.ssh,target=/home/user/.ssh,type=bind,consistency=cached",
"source=${localEnv:HOME}/.okta,target=/home/user/.okta,type=bind,consistency=cached",
"source=${localEnv:HOME}/.netrc,target=/home/user/.netrc,type=bind,consistency=cached"
],
"forwardPorts": [
8000
],
"runArgs": [
"--network=host"
],
"containerUser": "ubuntu",
"remoteUser": "ubuntu",
"updateRemoteUserUID": true,
"onCreateCommand": "sudo usermod -s /bin/bash ubuntu"
}

View File

@@ -0,0 +1,70 @@
data "aws_caller_identity" "current" {}
# Main S3 bucket policy to reject HTTPS requests
data "aws_iam_policy_document" "s3_reject_https_policy" {
statement {
sid = "s3RejectHTTPS"
effect = "Deny"
principals {
type = "*"
identifiers = ["*"]
}
actions = ["s3:*"]
resources = [
aws_s3_bucket.s3_bucket.arn,
"${aws_s3_bucket.s3_bucket.arn}/*",
]
condition {
test = "Bool"
variable = "aws:SecureTransport"
values = ["false"]
}
}
}
# Logging bucket policy to reject HTTPS requests and take logs
data "aws_iam_policy_document" "logging_bucket_policy" {
statement {
principals {
identifiers = ["logging.s3.amazonaws.com"]
type = "Service"
}
actions = ["s3:PutObject"]
resources = ["${aws_s3_bucket.logging.arn}/*"]
condition {
test = "StringEquals"
variable = "aws:SourceAccount"
values = [data.aws_caller_identity.current.account_id]
}
}
statement {
sid = "loggingRejectHTTPS"
effect = "Deny"
principals {
type = "*"
identifiers = ["*"]
}
actions = ["s3:*"]
resources = [
aws_s3_bucket.logging.arn,
"${aws_s3_bucket.logging.arn}/*"
]
condition {
test = "Bool"
variable = "aws:SecureTransport"
values = ["false"]
}
}
}

View File

@@ -0,0 +1,12 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 6.28"
}
}
}
provider "aws" {
region = "us-gov-west-1"
}

View File

@@ -0,0 +1,137 @@
# Disable warnings about MFA delete and IAM access analyzer (currently cannot support them)
# kics-scan disable=c5b31ab9-0f26-4a49-b8aa-4cc064392f4d,e592a0c5-5bdb-414c-9066-5dba7cdea370
# Bucket to actually store artifacts
resource "aws_s3_bucket" "s3_bucket" {
bucket = var.bucket
tags = {
Name = "Orchard S3 Provisioning Bucket"
Environment = var.environment
}
}
# Control public access
resource "aws_s3_bucket_public_access_block" "s3_bucket_public_access_block" {
bucket = aws_s3_bucket.s3_bucket.id
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}
/*
Our lifecycle rule is as follows:
- Standard storage
-> OneZone IA storage after 30 days
-> Glacier storage after 180 days
*/
resource "aws_s3_bucket_lifecycle_configuration" "s3_bucket_lifecycle_configuration" {
bucket = aws_s3_bucket.s3_bucket.id
rule {
id = "Standard to OneZone"
filter {}
status = "Enabled"
transition {
days = 30
storage_class = "ONEZONE_IA"
}
}
rule {
id = "OneZone to Glacier"
filter {}
status = "Enabled"
transition {
days = 180
storage_class = "GLACIER"
}
}
}
# Enable versioning but without MFA delete enabled
resource "aws_s3_bucket_versioning" "s3_bucket_versioning" {
bucket = aws_s3_bucket.s3_bucket.id
versioning_configuration {
status = "Enabled"
}
}
# Give preference to the bucket owner
resource "aws_s3_bucket_ownership_controls" "s3_bucket_ownership_controls" {
bucket = aws_s3_bucket.s3_bucket.id
rule {
object_ownership = "BucketOwnerPreferred"
}
}
# Set access control list to private
resource "aws_s3_bucket_acl" "s3_bucket_acl" {
depends_on = [aws_s3_bucket_ownership_controls.s3_bucket_ownership_controls]
bucket = aws_s3_bucket.s3_bucket.id
acl = var.acl
}
# Bucket for logging
resource "aws_s3_bucket" "logging" {
bucket = "orchard-logging-bucket"
tags = {
Name = "Orchard S3 Logging Bucket"
Environment = var.environment
}
}
# Versioning for the logging bucket
resource "aws_s3_bucket_versioning" "orchard_logging_bucket_versioning" {
bucket = aws_s3_bucket.logging.id
versioning_configuration {
status = "Enabled"
}
}
# Policies for the main s3 bucket and the logging bucket
resource "aws_s3_bucket_policy" "s3_bucket_https_policy" {
bucket = aws_s3_bucket.s3_bucket.id
policy = data.aws_iam_policy_document.s3_reject_https_policy.json
}
resource "aws_s3_bucket_policy" "logging_policy" {
bucket = aws_s3_bucket.logging.bucket
policy = data.aws_iam_policy_document.logging_bucket_policy.json
}
# Set up the logging bucket with folders with logs for both buckets
resource "aws_s3_bucket_logging" "s3_bucket_logging" {
bucket = aws_s3_bucket.s3_bucket.bucket
target_bucket = aws_s3_bucket.logging.bucket
target_prefix = "s3_log/"
target_object_key_format {
partitioned_prefix {
partition_date_source = "EventTime"
}
}
}
resource "aws_s3_bucket_logging" "logging_bucket_logging" {
bucket = aws_s3_bucket.logging.bucket
target_bucket = aws_s3_bucket.logging.bucket
target_prefix = "log/"
target_object_key_format {
partitioned_prefix {
partition_date_source = "EventTime"
}
}
}

View File

@@ -0,0 +1,17 @@
variable "bucket" {
description = "Name of the S3 bucket"
type = string
default = "orchard-provisioning-bucket"
}
variable "acl" {
description = "Access control list for the bucket"
type = string
default = "private"
}
variable "environment" {
description = "Environment of the bucket"
type = string
default = "Development"
}