diff --git a/.devcontainer/README.md b/.devcontainer/README.md index bf6a524f218..a175a0a9dcc 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -1,14 +1,18 @@ # Dev Container +> [!NOTE] > This is a community supported feature -To assist in the development of `modernisation-platform-environments`, the community have built a [dev container](https://containers.dev/) with the required tooling +To assist with working on this repository, the community has configured a [dev container](https://containers.dev/) with the required tooling. -## Prerequisites +You can run this locally, or with [GitHub Codespaces](https://docs.github.com/en/codespaces/overview). -- GitHub Codespaces +## Locally -or +> [!WARNING] +> This has only been tested on macOS + +### Prerequisites - Docker @@ -16,21 +20,18 @@ or - Dev Containers Extention -## Running - -### GitHub Codespaces - -Launch from GitHub +To launch locally, ensure the prerequisites are met, and then click the button below -### Locally +[![Open in Dev Container](https://raw.githubusercontent.com/ministryofjustice/.devcontainer/refs/heads/main/contrib/badge.svg)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/ministryofjustice/modernisation-platform-environments) -1. Ensure prerequisites are met +## GitHub Codespaces -1. Clone repository +> [!IMPORTANT] +> GitHub Codespaces are not currently paid for by the Ministry of Justice and are subject to the quotas [here](https://docs.github.com/en/billing/managing-billing-for-your-products/managing-billing-for-github-codespaces/about-billing-for-github-codespaces#monthly-included-storage-and-core-hours-for-personal-accounts) -1. Open repository in Visual Studio Code +To launch a GitHub Codespace, click the button below -1. Reopen in container +[![Open in Codespace](https://github.com/codespaces/badge.svg)](https://codespaces.new/ministryofjustice/modernisation-platform-environments) ## Tools diff --git a/.github/workflows/awsnuke.yml b/.github/workflows/awsnuke.yml index d49ab888f49..49558eaa5b7 100644 --- a/.github/workflows/awsnuke.yml +++ b/.github/workflows/awsnuke.yml @@ -133,11 +133,11 @@ jobs: - name: Slack failure notification uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 with: + webhook-type: incoming-webhook payload: | {"blocks":[{"type": "section","text": {"type": "mrkdwn","text": ":no_entry: Failed GitHub Action:"}},{"type": "section","fields":[{"type": "mrkdwn","text": "*Workflow:*\n<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|${{ github.workflow }}>"},{"type": "mrkdwn","text": "*Job:*\n${{ github.job }}"},{"type": "mrkdwn","text": "*Repo:*\n${{ github.repository }}"}]}]} env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK if: ${{ failure() }} env: ACCOUNT_NAME: ${{ matrix.nuke_accts }} @@ -217,11 +217,11 @@ jobs: - name: Slack failure notification uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 with: + webhook-type: incoming-webhook payload: | {"blocks":[{"type": "section","text": {"type": "mrkdwn","text": ":no_entry: Failed GitHub Action:"}},{"type": "section","fields":[{"type": "mrkdwn","text": "*Workflow:*\n<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|${{ github.workflow }}>"},{"type": "mrkdwn","text": "*Job:*\n${{ github.job }}"},{"type": "mrkdwn","text": "*Repo:*\n${{ github.repository }}"}]}]} env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK if: ${{ failure() }} env: AWS_ACCESS_KEY_ID: ${{ secrets.TESTING_AWS_ACCESS_KEY_ID }} diff --git a/.github/workflows/code-scanning.yml b/.github/workflows/code-scanning.yml index a6642609d96..c726062c99f 100644 --- a/.github/workflows/code-scanning.yml +++ b/.github/workflows/code-scanning.yml @@ -38,7 +38,7 @@ jobs: run: tflint --disable-rule=terraform_unused_declarations --format sarif > tflint.sarif - name: Upload SARIF file if: success() || failure() - uses: github/codeql-action/upload-sarif@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3.27.4 + uses: github/codeql-action/upload-sarif@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 with: sarif_file: tflint.sarif trivy: @@ -53,7 +53,7 @@ jobs: uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner in repo mode - uses: aquasecurity/trivy-action@915b19bbe73b92a6cf82a1bc12b087c9a19a5fe2 + uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0 with: scan-type: 'fs' scanners: misconfig,vuln,secret @@ -63,7 +63,7 @@ jobs: - name: Upload Trivy scan results to GitHub Security tab if: success() || failure() - uses: github/codeql-action/upload-sarif@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3.27.4 + uses: github/codeql-action/upload-sarif@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 with: sarif_file: 'trivy-results.sarif' checkov: @@ -81,7 +81,7 @@ jobs: fetch-depth: 0 - name: Run Checkov action id: checkov - uses: bridgecrewio/checkov-action@6fe02213c515948c8da243a6554a3bff49129295 # v12.2912.0 + uses: bridgecrewio/checkov-action@f10397402800d31940c9cefd680c66688a516c9f # v12.2932.0 with: directory: ./ framework: terraform @@ -90,6 +90,6 @@ jobs: skip_check: CKV_GIT_1,CKV_AWS_126,CKV2_AWS_38,CKV2_AWS_39 - name: Upload SARIF file if: success() || failure() - uses: github/codeql-action/upload-sarif@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3.27.4 + uses: github/codeql-action/upload-sarif@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 with: sarif_file: ./checkov.sarif diff --git a/.github/workflows/format-code.yml b/.github/workflows/format-code.yml index b1dea43397b..291666fbab0 100644 --- a/.github/workflows/format-code.yml +++ b/.github/workflows/format-code.yml @@ -40,7 +40,7 @@ jobs: id: ml # You can override MegaLinter flavor used to have faster performances # More info at https://megalinter.io/flavors/ - uses: oxsecurity/megalinter/flavors/terraform@d8c95fc6f2237031fb9e9322b0f97100168afa6e #v8.2.0 + uses: oxsecurity/megalinter/flavors/terraform@1fc052d03c7a43c78fe0fee19c9d648b749e0c01 #v8.3.0 env: # All available variables are described in documentation # https://megalinter.io/configuration/#shared-variables diff --git a/.github/workflows/nuke-redeploy.yml b/.github/workflows/nuke-redeploy.yml index 80c2bb6772c..c95fcb7965b 100644 --- a/.github/workflows/nuke-redeploy.yml +++ b/.github/workflows/nuke-redeploy.yml @@ -93,11 +93,11 @@ jobs: - name: Slack failure notification uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d # v2.0.0 with: + webhook-type: incoming-webhook payload: | {"blocks":[{"type": "section","text": {"type": "mrkdwn","text": ":no_entry: Failed GitHub Action:"}},{"type": "section","fields":[{"type": "mrkdwn","text": "*Workflow:*\n<${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|${{ github.workflow }}>"},{"type": "mrkdwn","text": "*Job:*\n${{ github.job }}"},{"type": "mrkdwn","text": "*Repo:*\n${{ github.repository }}"}]}]} env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK if: ${{ failure() }} env: diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c035475ffb6..0ef3ffd64bc 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3.27.4 + uses: github/codeql-action/upload-sarif@aa578102511db1f4524ed59b8cc2bae4f6e88195 # v3.27.6 with: sarif_file: results.sarif diff --git a/terraform/environments/analytical-platform-compute/ec2-instances.tf b/terraform/environments/analytical-platform-compute/ec2-instances.tf new file mode 100644 index 00000000000..4104af0a722 --- /dev/null +++ b/terraform/environments/analytical-platform-compute/ec2-instances.tf @@ -0,0 +1,35 @@ +module "debug_instance" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + + source = "terraform-aws-modules/ec2-instance/aws" + version = "5.7.1" + + name = "network-debug" + ami = "ami-0e8d228ad90af673b" # Ubuntu Server 24.04 LTS + instance_type = "t3.micro" + subnet_id = element(module.vpc.private_subnets, 0) + vpc_security_group_ids = [module.debug_instance_security_group.security_group_id] + associate_public_ip_address = false + + root_block_device = [ + { + encrypted = true + volume_type = "gp3" + volume_size = 8 + } + ] + + create_iam_instance_profile = true + iam_role_policies = { + SSMCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + + metadata_options = { + http_endpoint = "enabled" + http_put_response_hop_limit = 1 + http_tokens = "required" + instance_metadata_tags = "enabled" + } + + tags = local.tags +} diff --git a/terraform/environments/analytical-platform-compute/environment-configuration.tf b/terraform/environments/analytical-platform-compute/environment-configuration.tf index 574db8dde12..6009114ee9a 100644 --- a/terraform/environments/analytical-platform-compute/environment-configuration.tf +++ b/terraform/environments/analytical-platform-compute/environment-configuration.tf @@ -13,11 +13,7 @@ locals { vpc_single_nat_gateway = false /* Transit Gateway */ - transit_gateway_routes = [ - "10.26.0.0/15", # modernisation-platform - "10.40.0.0/18", # noms-live-vnet - "10.205.0.0/20" # laa-lz-prod - ] + transit_gateway_routes = ["10.0.0.0/8"] /* Route53 */ route53_zone = "compute.development.analytical-platform.service.justice.gov.uk" @@ -42,14 +38,12 @@ locals { /* MLFlow */ mlflow_s3_bucket_name = "alpha-analytical-platform-mlflow-development" - /* Observability Platform */ - observability_platform = "development" - /* QuickSight */ quicksight_notification_email = "analytical-platform@digital.justice.gov.uk" /* UI */ ui_hostname = "development.analytical-platform.service.justice.gov.uk" + } test = { /* VPC */ @@ -64,11 +58,7 @@ locals { vpc_single_nat_gateway = false /* Transit Gateway */ - transit_gateway_routes = [ - "10.26.0.0/15", # modernisation-platform - "10.40.0.0/18", # noms-live-vnet - "10.205.0.0/20" # laa-lz-prod - ] + transit_gateway_routes = ["10.0.0.0/8"] /* Route53 */ route53_zone = "compute.test.analytical-platform.service.justice.gov.uk" @@ -87,9 +77,6 @@ locals { vpc_cni = "v1.19.0-eksbuild.1" } - /* Observability Platform */ - observability_platform = "development" - /* Data Engineering Airflow */ data_engineering_airflow_execution_role_arn = "arn:aws:iam::${local.environment_management.account_ids["analytical-platform-data-production"]}:role/airflow-dev-execution-role" @@ -115,10 +102,7 @@ locals { vpc_single_nat_gateway = false /* Transit Gateway */ - transit_gateway_routes = [ - "10.26.0.0/15", # modernisation-platform - "10.40.0.0/18" # noms-live-vnet - ] + transit_gateway_routes = ["10.0.0.0/8"] /* Route53 */ route53_zone = "compute.analytical-platform.service.justice.gov.uk" @@ -143,14 +127,16 @@ locals { /* MLFlow */ mlflow_s3_bucket_name = "alpha-analytical-platform-mlflow" - /* Observability Platform */ - observability_platform = "production" - /* QuickSight */ quicksight_notification_email = "analytical-platform@digital.justice.gov.uk" /* UI */ ui_hostname = "analytical-platform.service.justice.gov.uk" + + /* LF Domain Tags */ + cadet_lf_tags = { + domain = ["bold", "civil", "courts", "general", "criminal_history", "development_sandpit", "electronic_monitoring", "finance", "interventions", "opg", "performance", "risk", "people", "prison", "probation", "staging", "victims", "victims_case_management"] # extracted from bucket paths + } } } } diff --git a/terraform/environments/analytical-platform-compute/helm-charts-actions-runners.tf b/terraform/environments/analytical-platform-compute/helm-charts-actions-runners.tf index 936a236ccb4..e37b997235b 100644 --- a/terraform/environments/analytical-platform-compute/helm-charts-actions-runners.tf +++ b/terraform/environments/analytical-platform-compute/helm-charts-actions-runners.tf @@ -174,3 +174,27 @@ resource "helm_release" "actions_runner_mojas_create_a_derived_table_emds_test" ) ] } + +resource "helm_release" "actions_runner_mojas_create_a_derived_table_emds" { + count = terraform.workspace == "analytical-platform-compute-production" ? 1 : 0 + + /* https://github.com/ministryofjustice/analytical-platform-actions-runner */ + name = "actions-runner-mojas-create-a-derived-table-emds" + repository = "oci://ghcr.io/ministryofjustice/analytical-platform-charts" + version = "2.320.0-4" + chart = "actions-runner" + namespace = kubernetes_namespace.actions_runners[0].metadata[0].name + values = [ + templatefile( + "${path.module}/src/helm/values/actions-runners/create-a-derived-table/values.yml.tftpl", + { + github_app_application_id = jsondecode(data.aws_secretsmanager_secret_version.actions_runners_token_apc_self_hosted_runners_github_app[0].secret_string)["app_id"] + github_app_installation_id = jsondecode(data.aws_secretsmanager_secret_version.actions_runners_token_apc_self_hosted_runners_github_app[0].secret_string)["installation_id"] + github_organisation = "moj-analytical-services" + github_repository = "create-a-derived-table" + github_runner_labels = "electronic-monitoring-data" + eks_role_arn = "arn:aws:iam::${local.environment_management.account_ids["electronic-monitoring-data-production"]}:role/prod-data-api-cross-account-role" + } + ) + ] +} diff --git a/terraform/environments/analytical-platform-compute/helm-charts-applications.tf b/terraform/environments/analytical-platform-compute/helm-charts-applications.tf deleted file mode 100644 index 967af2b0763..00000000000 --- a/terraform/environments/analytical-platform-compute/helm-charts-applications.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "helm_release" "ui" { - /* https://github.com/ministryofjustice/analytical-platform-ui */ - name = "ui" - repository = "oci://ghcr.io/ministryofjustice/analytical-platform-charts" - version = "0.2.6" - chart = "analytical-platform-ui" - namespace = kubernetes_namespace.ui.metadata[0].name - values = [ - templatefile( - "${path.module}/src/helm/values/ui/values.yml.tftpl", - { - ui_hostname = local.environment_configuration.ui_hostname - eks_role_arn = module.analytical_platform_ui_service_role.iam_role_arn - } - ) - ] -} diff --git a/terraform/environments/analytical-platform-compute/iam-policies.tf b/terraform/environments/analytical-platform-compute/iam-policies.tf index 84721171ddb..607480dde57 100644 --- a/terraform/environments/analytical-platform-compute/iam-policies.tf +++ b/terraform/environments/analytical-platform-compute/iam-policies.tf @@ -349,3 +349,108 @@ module "data_production_mojap_derived_bucket_lake_formation_policy" { tags = local.tags } + +data "aws_iam_policy_document" "copy_apdp_cadet_metadata_to_compute_policy" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + statement { + sid = "AthenaAccess" + effect = "Allow" + actions = [ + "athena:List*", + "athena:Get*", + "athena:StartQueryExecution", + "athena:StopQueryExecution" + ] + resources = [ + "arn:aws:athena:eu-west-2:${data.aws_caller_identity.current.account_id}:datacatalog/*", + "arn:aws:athena:eu-west-2:${data.aws_caller_identity.current.account_id}:workgroup/*" + ] + } + statement { + sid = "GlueAccess" + effect = "Allow" + actions = [ + "glue:Get*", + "glue:DeleteTable", + "glue:DeleteTableVersion", + "glue:DeleteSchema", + "glue:DeletePartition", + "glue:DeleteDatabase", + "glue:UpdateTable", + "glue:UpdateSchema", + "glue:UpdatePartition", + "glue:UpdateDatabase", + "glue:CreateTable", + "glue:CreateSchema", + "glue:CreatePartition", + "glue:CreatePartitionIndex", + "glue:BatchCreatePartition", + "glue:CreateDatabase" + ] + resources = [ + "arn:aws:glue:eu-west-2:${data.aws_caller_identity.current.account_id}:schema/*", + "arn:aws:glue:eu-west-2:${data.aws_caller_identity.current.account_id}:database/*", + "arn:aws:glue:eu-west-2:${data.aws_caller_identity.current.account_id}:table/*/*", + "arn:aws:glue:eu-west-2:${data.aws_caller_identity.current.account_id}:catalog" + ] + } + statement { + sid = "GlueFetchMetadataAccess" + effect = "Allow" + actions = [ + "glue:GetTable", + "glue:GetDatabase", + "glue:GetPartition" + ] + resources = ["arn:aws:glue:eu-west-2:${data.aws_caller_identity.current.account_id}:*"] + } + statement { + sid = "AthenaQueryBucketAccess" + effect = "Allow" + actions = [ + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:AbortMultipartUpload", + "s3:PutObject" + ] + resources = [ + module.mojap_compute_athena_query_results_bucket_eu_west_2.s3_bucket_arn, + "${module.mojap_compute_athena_query_results_bucket_eu_west_2.s3_bucket_arn}/*" + ] + } + statement { + sid = "AlterLFTags" + effect = "Allow" + actions = [ + "lakeformation:ListLFTags", + "lakeformation:GetLFTag", + "lakeformation:CreateLFTag", + "lakeformation:UpdateLFTag", + "lakeformation:AddLFTagsToResource", + "lakeformation:RemoveLFTagsFromResource", + "lakeformation:GetResourceLFTags", + "lakeformation:SearchTablesByLFTags", + "lakeformation:SearchDatabasesByLFTags", + ] + resources = ["*"] + } + +} + +module "copy_apdp_cadet_metadata_to_compute_policy" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + + source = "terraform-aws-modules/iam/aws//modules/iam-policy" + version = "5.48.0" + + name_prefix = "copy-apdp-cadet-metadata-to-compute-" + + policy = data.aws_iam_policy_document.copy_apdp_cadet_metadata_to_compute_policy.json + + tags = local.tags +} diff --git a/terraform/environments/analytical-platform-compute/iam-roles.tf b/terraform/environments/analytical-platform-compute/iam-roles.tf index 6bf418383fa..a27fc18735e 100644 --- a/terraform/environments/analytical-platform-compute/iam-roles.tf +++ b/terraform/environments/analytical-platform-compute/iam-roles.tf @@ -373,3 +373,23 @@ module "lake_formation_to_data_production_mojap_derived_tables_role" { tags = local.tags } + +module "copy_apdp_cadet_metadata_to_compute_assumable_role" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role" + version = "5.48.0" + + allow_self_assume_role = false + trusted_role_arns = [ + "arn:aws:iam::${local.environment_management.account_ids["analytical-platform-data-production"]}:role/create-a-derived-table", + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-reserved/sso.amazonaws.com/${data.aws_region.current.name}/${one(data.aws_iam_roles.data_engineering_sso_role.names)}", + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-reserved/sso.amazonaws.com/${data.aws_region.current.name}/${one(data.aws_iam_roles.eks_sso_access_role.names)}", + ] + create_role = true + role_requires_mfa = false + role_name = "copy-apdp-cadet-metadata-to-compute" + + custom_role_policy_arns = [module.copy_apdp_cadet_metadata_to_compute_policy.arn] + # number_of_custom_role_policy_arns = 1 +} diff --git a/terraform/environments/analytical-platform-compute/kms-keys.tf b/terraform/environments/analytical-platform-compute/kms-keys.tf index a7b2d2bf3da..7ee087235b5 100644 --- a/terraform/environments/analytical-platform-compute/kms-keys.tf +++ b/terraform/environments/analytical-platform-compute/kms-keys.tf @@ -275,6 +275,22 @@ module "mlflow_s3_kms" { tags = local.tags } +module "mojap_compute_athena_s3_kms_eu_west_2" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + + source = "terraform-aws-modules/kms/aws" + version = "3.1.1" + + aliases = ["s3/mojap-compute-athena-query-results-eu-west-2"] + description = "Mojap Athena query bucket S3 KMS key for eu-west-2" + enable_default_policy = true + + deletion_window_in_days = 7 + + tags = local.tags +} + module "mojap_compute_logs_s3_kms_eu_west_2" { #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions #checkov:skip=CKV_TF_2:Module registry does not support tags for versions diff --git a/terraform/environments/analytical-platform-compute/lakeformation-data-lake-settings.tf b/terraform/environments/analytical-platform-compute/lakeformation-data-lake-settings.tf index 8d7f889e1a2..bd8fc44557a 100644 --- a/terraform/environments/analytical-platform-compute/lakeformation-data-lake-settings.tf +++ b/terraform/environments/analytical-platform-compute/lakeformation-data-lake-settings.tf @@ -5,7 +5,8 @@ resource "aws_lakeformation_data_lake_settings" "london" { module.analytical_platform_ui_service_role.iam_role_arn, module.analytical_platform_data_eng_dba_service_role.iam_role_arn, "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-reserved/sso.amazonaws.com/${data.aws_region.current.name}/${one(data.aws_iam_roles.data_engineering_sso_role.names)}", - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-reserved/sso.amazonaws.com/${data.aws_region.current.name}/${one(data.aws_iam_roles.eks_sso_access_role.names)}" + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-reserved/sso.amazonaws.com/${data.aws_region.current.name}/${one(data.aws_iam_roles.eks_sso_access_role.names)}", + module.copy_apdp_cadet_metadata_to_compute_assumable_role.iam_role_arn ] } diff --git a/terraform/environments/analytical-platform-compute/lakeformation-permissions.tf b/terraform/environments/analytical-platform-compute/lakeformation-permissions.tf new file mode 100644 index 00000000000..a54f5936a3d --- /dev/null +++ b/terraform/environments/analytical-platform-compute/lakeformation-permissions.tf @@ -0,0 +1,58 @@ + +resource "aws_lakeformation_lf_tag" "source" { + count = terraform.workspace == "analytical-platform-compute-production" ? 1 : 0 + key = "source" + values = ["create-a-derived-table"] +} + +resource "aws_lakeformation_permissions" "cadet_all_data" { + for_each = (terraform.workspace == "analytical-platform-compute-production" ? + toset(["TABLE", "DATABASE"]) : toset([])) + + principal = module.copy_apdp_cadet_metadata_to_compute_assumable_role.iam_role_arn + permissions = ["ALL"] # https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html + + lf_tag_policy { + resource_type = each.value + expression { + key = "source" + values = ["create-a-derived-table"] + } + } +} + +resource "aws_lakeformation_lf_tag" "domain" { + for_each = try(local.environment_configuration.cadet_lf_tags, {}) + key = each.key + values = each.value +} + +resource "aws_lakeformation_permissions" "cadet_domain_database_data" { + for_each = try(local.environment_configuration.cadet_lf_tags, {}) + + principal = module.copy_apdp_cadet_metadata_to_compute_assumable_role.iam_role_arn + permissions = ["ALL"] # https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html + + lf_tag_policy { + resource_type = "DATABASE" + expression { + key = each.key + values = each.value + } + } +} + +resource "aws_lakeformation_permissions" "cadet_domain_table_data" { + for_each = try(local.environment_configuration.cadet_lf_tags, {}) + + principal = module.copy_apdp_cadet_metadata_to_compute_assumable_role.iam_role_arn + permissions = ["ALL"] # https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html + + lf_tag_policy { + resource_type = "TABLE" + expression { + key = each.key + values = each.value + } + } +} diff --git a/terraform/environments/analytical-platform-compute/lakeformation-registrations.tf b/terraform/environments/analytical-platform-compute/lakeformation-registrations.tf index e69de29bb2d..f6d3ed5d4a7 100644 --- a/terraform/environments/analytical-platform-compute/lakeformation-registrations.tf +++ b/terraform/environments/analytical-platform-compute/lakeformation-registrations.tf @@ -0,0 +1,4 @@ +resource "aws_lakeformation_resource" "example" { + arn = "arn:aws:s3:::mojap-derived-tables" + role_arn = module.lake_formation_to_data_production_mojap_derived_tables_role.iam_role_arn +} diff --git a/terraform/environments/analytical-platform-compute/observability-platform.tf b/terraform/environments/analytical-platform-compute/observability-platform.tf index 62f94c01f67..060851781dd 100644 --- a/terraform/environments/analytical-platform-compute/observability-platform.tf +++ b/terraform/environments/analytical-platform-compute/observability-platform.tf @@ -5,7 +5,7 @@ module "observability_platform_tenant" { source = "ministryofjustice/observability-platform-tenant/aws" version = "1.2.0" - observability_platform_account_id = local.environment_management.account_ids["observability-platform-${local.environment_configuration.observability_platform}"] + observability_platform_account_id = local.environment_management.account_ids["observability-platform-production"] enable_prometheus = true enable_xray = true additional_policies = { diff --git a/terraform/environments/analytical-platform-compute/s3-buckets.tf b/terraform/environments/analytical-platform-compute/s3-buckets.tf index e0bafa32fc1..03712ea8813 100644 --- a/terraform/environments/analytical-platform-compute/s3-buckets.tf +++ b/terraform/environments/analytical-platform-compute/s3-buckets.tf @@ -136,28 +136,61 @@ module "mojap_compute_logs_bucket_eu_west_1" { ) } -moved { - from = module.mojap_compute_logs_bucket.aws_s3_bucket.this[0] - to = module.mojap_compute_logs_bucket_eu_west_2.aws_s3_bucket.this[0] -} -moved { - from = module.mojap_compute_logs_bucket.aws_s3_bucket_policy.this[0] - to = module.mojap_compute_logs_bucket_eu_west_2.aws_s3_bucket_policy.this[0] -} -moved { - from = module.mojap_compute_logs_bucket.aws_s3_bucket_public_access_block.this[0] - to = module.mojap_compute_logs_bucket_eu_west_2.aws_s3_bucket_public_access_block.this[0] -} -moved { - from = module.mojap_compute_logs_bucket.aws_s3_bucket_server_side_encryption_configuration.this[0] - to = module.mojap_compute_logs_bucket_eu_west_2.aws_s3_bucket_server_side_encryption_configuration.this[0] -} -moved { - from = module.mojap_compute_logs_bucket.aws_s3_bucket_versioning.this[0] - to = module.mojap_compute_logs_bucket_eu_west_2.aws_s3_bucket_versioning.this[0] + +data "aws_iam_policy_document" "athena_query_results_policy_eu_west_2" { + #checkov:skip=CKV_AWS_356:resource "*" limited by condition + statement { + sid = "DenyInsecureTransport" + effect = "Deny" + actions = ["s3:*"] + resources = [ + "arn:aws:s3:::mojap-compute-${local.environment}-athena-query-results-eu-west-2/*", + "arn:aws:s3:::mojap-compute-${local.environment}-athena-query-results-eu-west-2" + ] + principals { + type = "*" + identifiers = ["*"] + } + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } } -moved { - from = aws_iam_policy_document.s3_server_access_logs_policy - to = aws_iam_policy_document.s3_server_access_logs_eu_west_2_policy +module "mojap_compute_athena_query_results_bucket_eu_west_2" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + + source = "terraform-aws-modules/s3-bucket/aws" + version = "4.2.2" + + bucket = "mojap-compute-${local.environment}-athena-query-results-eu-west-2" + + force_destroy = true + + attach_policy = true + policy = data.aws_iam_policy_document.athena_query_results_policy_eu_west_2.json + + object_lock_enabled = false + + versioning = { + status = "Disabled" + } + + server_side_encryption_configuration = { + rule = { + bucket_key_enabled = true + apply_server_side_encryption_by_default = { + kms_master_key_id = module.mojap_compute_athena_s3_kms_eu_west_2.key_arn + sse_algorithm = "aws:kms" + } + } + } + + tags = merge( + local.tags, + { "backup" = "false" } + ) } diff --git a/terraform/environments/analytical-platform-compute/security-groups.tf b/terraform/environments/analytical-platform-compute/security-groups.tf index 4812bcc7da0..eb52972deeb 100644 --- a/terraform/environments/analytical-platform-compute/security-groups.tf +++ b/terraform/environments/analytical-platform-compute/security-groups.tf @@ -55,3 +55,19 @@ module "quicksight_shared_vpc_security_group" { tags = local.tags } + +/* This security group is temporary and will be retired when we're satisfied with DataSync end-to-end */ +module "debug_instance_security_group" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + + source = "terraform-aws-modules/security-group/aws" + version = "5.2.0" + + name = "debug-instance" + vpc_id = module.vpc.vpc_id + + egress_cidr_blocks = ["0.0.0.0/0"] + egress_rules = ["all-all"] + + tags = local.tags +} diff --git a/terraform/environments/analytical-platform-compute/src/helm/values/amazon-prometheus-proxy/values.yml.tftpl b/terraform/environments/analytical-platform-compute/src/helm/values/amazon-prometheus-proxy/values.yml.tftpl index 469f7871cf6..0a6f30fe7ad 100644 --- a/terraform/environments/analytical-platform-compute/src/helm/values/amazon-prometheus-proxy/values.yml.tftpl +++ b/terraform/environments/analytical-platform-compute/src/helm/values/amazon-prometheus-proxy/values.yml.tftpl @@ -14,6 +14,10 @@ alertmanager: grafana: enabled: false +kube-state-metrics: + extraArgs: + - --metric-labels-allowlist=pods=[*] + prometheus: agentMode: true serviceAccount: diff --git a/terraform/environments/analytical-platform-compute/vpc.tf b/terraform/environments/analytical-platform-compute/vpc.tf index f134388e418..0bd5c7c5daf 100644 --- a/terraform/environments/analytical-platform-compute/vpc.tf +++ b/terraform/environments/analytical-platform-compute/vpc.tf @@ -30,6 +30,7 @@ module "vpc" { flow_log_cloudwatch_log_group_retention_in_days = local.vpc_flow_log_cloudwatch_log_group_retention_in_days flow_log_max_aggregation_interval = local.vpc_flow_log_max_aggregation_interval vpc_flow_log_tags = { Name = local.our_vpc_name } + flow_log_log_format = "$${version} $${account-id} $${interface-id} $${srcaddr} $${dstaddr} $${srcport} $${dstport} $${protocol} $${packets} $${bytes} $${start} $${end} $${action} $${log-status} $${vpc-id} $${subnet-id} $${instance-id} $${tcp-flags} $${type} $${pkt-srcaddr} $${pkt-dstaddr} $${region} $${az-id} $${sublocation-type} $${sublocation-id} $${pkt-src-aws-service} $${pkt-dst-aws-service} $${flow-direction} $${traffic-path}" public_subnet_tags = { "kubernetes.io/role/elb" = 1 diff --git a/terraform/environments/analytical-platform-ingestion/environment-configuration.tf b/terraform/environments/analytical-platform-ingestion/environment-configuration.tf index 71ad724ce2a..516cc0f9fa7 100644 --- a/terraform/environments/analytical-platform-ingestion/environment-configuration.tf +++ b/terraform/environments/analytical-platform-ingestion/environment-configuration.tf @@ -19,9 +19,6 @@ locals { "10.0.0.0/8" ] - /* Observability Platform */ - observability_platform = "development" - /* Image Versions */ scan_image_version = "0.1.3" transfer_image_version = "0.0.18" @@ -67,9 +64,6 @@ locals { "10.0.0.0/8" ] - /* Observability Platform */ - observability_platform = "production" - /* Image Versions */ scan_image_version = "0.1.3" transfer_image_version = "0.0.18" @@ -92,20 +86,6 @@ locals { egress_bucket = module.bold_egress_bucket.s3_bucket_id egress_bucket_kms_key = module.s3_bold_egress_kms.key_arn } - "darren-brooke" = { - ssh_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAxeaj85/JshqYMQ1B97TtHyy81oF3L33s89NWCIiHSM/Hql6aFfxCCivsN4Y1OZic8S5drgxe7MdETaWeEKfaWIMgqESGOw5yhCuNSEvt896cc0hSU8/ZwUZrTzYfiCAwqBQHI13JBAP7VcWBR6v6CYQL8JB7lSEvq7vY2BJJ4N9HchlXBHvxHHOu7Y6+ta7BrODvCc0zLHWANE65U4DmZpXmwHHsBao4cOUIlrBIDIAGtXAJB/L+cByH2OPMsRPhUe2UMfTgRHCJdekics/7DzrR+hhZRnHM9du52TFT89eAKpQGpp0wEkFoYKntXesGFr1R/uhRtqzanzBggXIv db@ubuntu" - cidr_blocks = ["54.37.241.156/30"] - egress_bucket = module.ext_2024_egress_bucket.s3_bucket_id - egress_bucket_kms_key = module.s3_ext_2024_egress_kms.key_arn - - } - "aaron-willetts" = { - ssh_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAtHz+QozotArRIjRcmD4GDdiQLtXPTX+GGAXqpeqpBZ aaron@kali" - cidr_blocks = ["167.71.136.237/32"] - egress_bucket = module.ext_2024_egress_bucket.s3_bucket_id - egress_bucket_kms_key = module.s3_ext_2024_egress_kms.key_arn - - } } /* DataSync */ diff --git a/terraform/environments/analytical-platform-ingestion/ext-user-2024.tf b/terraform/environments/analytical-platform-ingestion/ext-user-2024.tf deleted file mode 100644 index dd723bd1057..00000000000 --- a/terraform/environments/analytical-platform-ingestion/ext-user-2024.tf +++ /dev/null @@ -1,102 +0,0 @@ -#tfsec:ignore:avd-aws-0088 - The bucket policy is attached to the bucket -#tfsec:ignore:avd-aws-0132 - The bucket policy is attached to the bucket -module "ext_2024_egress_bucket" { - #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions - - source = "terraform-aws-modules/s3-bucket/aws" - version = "4.1.2" - - bucket = "mojap-ingestion-${local.environment}-ext-2024-egress" - - force_destroy = true - - versioning = { - enabled = true - } - - server_side_encryption_configuration = { - rule = { - apply_server_side_encryption_by_default = { - kms_master_key_id = module.s3_ext_2024_egress_kms.key_arn - sse_algorithm = "aws:kms" - } - } - } -} - -module "s3_ext_2024_egress_kms" { - #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions - - source = "terraform-aws-modules/kms/aws" - version = "3.1.0" - - aliases = ["s3/ext-2024-egress"] - description = "Used in the External 2024 Egress Solution" - enable_default_policy = true - key_statements = [ - { - sid = "AllowReadOnlyRole" - actions = [ - "kms:Encrypt", - "kms:GenerateDataKey" - ] - resources = ["*"] - effect = "Allow" - principals = [ - { - type = "AWS" - identifiers = ["arn:aws:iam::${local.environment_management.account_ids[terraform.workspace]}:role/security-read-only"] - } - ] - } - ] - deletion_window_in_days = 7 -} - -data "aws_iam_policy_document" "ext_2024_target_bucket_policy" { - statement { - sid = "LandingPermissions" - effect = "Allow" - principals { - type = "AWS" - identifiers = ["arn:aws:iam::471112983409:role/transfer"] - } - actions = [ - "s3:GetObject", - "s3:PutObject", - "s3:DeleteObject", - "s3:PutObjectTagging" - ] - resources = [ - "arn:aws:s3:::mojap-ingestion-${local.environment}-ext-2024-target/*", - "arn:aws:s3:::mojap-ingestion-${local.environment}-ext-2024-target" - ] - } -} - -#tfsec:ignore:avd-aws-0088 - The bucket policy is attached to the bucket -#tfsec:ignore:avd-aws-0132 - The bucket policy is attached to the bucket -module "ext_2024_target_bucket" { - #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions - - source = "terraform-aws-modules/s3-bucket/aws" - version = "4.1.2" - - bucket = "mojap-ingestion-${local.environment}-ext-2024-target" - - force_destroy = true - - versioning = { - enabled = true - } - attach_policy = true - policy = data.aws_iam_policy_document.ext_2024_target_bucket_policy.json - - server_side_encryption_configuration = { - rule = { - apply_server_side_encryption_by_default = { - sse_algorithm = "AES256" - } - } - } -} diff --git a/terraform/environments/analytical-platform-ingestion/observability-platform.tf b/terraform/environments/analytical-platform-ingestion/observability-platform.tf index c8370b61b5e..7e0f310f633 100644 --- a/terraform/environments/analytical-platform-ingestion/observability-platform.tf +++ b/terraform/environments/analytical-platform-ingestion/observability-platform.tf @@ -4,7 +4,7 @@ module "observability_platform_tenant" { source = "ministryofjustice/observability-platform-tenant/aws" version = "1.2.0" - observability_platform_account_id = local.environment_management.account_ids["observability-platform-${local.environment_configuration.observability_platform}"] + observability_platform_account_id = local.environment_management.account_ids["observability-platform-production"] enable_xray = true tags = local.tags diff --git a/terraform/environments/analytical-platform-ingestion/route53-resolver-associations.tf b/terraform/environments/analytical-platform-ingestion/route53-resolver-associations.tf index ce88eee701d..8df93f956a5 100644 --- a/terraform/environments/analytical-platform-ingestion/route53-resolver-associations.tf +++ b/terraform/environments/analytical-platform-ingestion/route53-resolver-associations.tf @@ -1,4 +1,5 @@ module "connected_vpc_route53_resolver_associations" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions source = "terraform-aws-modules/route53/aws//modules/resolver-rule-associations" version = "4.1.0" diff --git a/terraform/environments/analytical-platform-ingestion/route53-resolver-endpoints.tf b/terraform/environments/analytical-platform-ingestion/route53-resolver-endpoints.tf index 1801544330c..ea66ccb8afe 100644 --- a/terraform/environments/analytical-platform-ingestion/route53-resolver-endpoints.tf +++ b/terraform/environments/analytical-platform-ingestion/route53-resolver-endpoints.tf @@ -1,4 +1,6 @@ module "connected_vpc_outbound_route53_resolver_endpoint" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + source = "terraform-aws-modules/route53/aws//modules/resolver-endpoints" version = "4.1.0" @@ -24,4 +26,4 @@ module "connected_vpc_outbound_route53_resolver_endpoint" { ] tags = local.tags -} \ No newline at end of file +} diff --git a/terraform/environments/apex/application_variables.json b/terraform/environments/apex/application_variables.json index 88dc67f6171..8696d1cc328 100644 --- a/terraform/environments/apex/application_variables.json +++ b/terraform/environments/apex/application_variables.json @@ -153,17 +153,17 @@ "lz_account_id": "484221692666" }, "production": { - "ec2amiid": "ami-0fd11105aa6dcd77d", + "ec2amiid": "ami-0484bb2dca8e69b20", "ec2instancetype": "t3.xlarge", "workspace_cidr": "10.200.16.0/20", "u01_orahome_size": "20", - "u01_orahome_snapshot": "snap-0ff8db461fc1cc4af", + "u01_orahome_snapshot": "snap-0f65cdb141493bd72", "u02_oradata_size": "100", - "u02_oradata_snapshot": "snap-00aee055837c0f329", + "u02_oradata_snapshot": "snap-0497aeb9f2f4029e3", "u03_redo_size": "50", - "u03_redo_snapshot": "snap-07292192ef32185e2", + "u03_redo_snapshot": "snap-0f0890c80d8c4efe9", "u04_arch_size": "50", - "u04_arch_snapshot": "snap-0d419d52da8066a22", + "u04_arch_snapshot": "snap-07f384ea32af67b25", "container_instance_type": "linux", "instance_type": "m5.large", "key_name": "", diff --git a/terraform/environments/apex/ec2.tf b/terraform/environments/apex/ec2.tf index 0aeb376f0bd..2da15c52e03 100644 --- a/terraform/environments/apex/ec2.tf +++ b/terraform/environments/apex/ec2.tf @@ -17,6 +17,9 @@ resource "aws_instance" "apex_db_instance" { user_data_base64 = base64encode(local.database-instance-userdata) user_data_replace_on_change = true + lifecycle { + ignore_changes = [user_data_base64] + } root_block_device { delete_on_termination = false @@ -65,14 +68,14 @@ resource "aws_vpc_security_group_ingress_rule" "db_ecs" { to_port = 1521 } -# resource "aws_vpc_security_group_ingress_rule" "db_mp_vpc" { -# security_group_id = aws_security_group.database.id -# description = "Allow MP VPC (OAS) to access database instance" -# cidr_ipv4 = data.aws_vpc.shared.cidr_block -# from_port = 1521 -# ip_protocol = "tcp" -# to_port = 1521 -# } +resource "aws_vpc_security_group_ingress_rule" "db_mp_vpc" { + security_group_id = aws_security_group.database.id + description = "Allow MP VPC (OAS) to access database instance" + cidr_ipv4 = data.aws_vpc.shared.cidr_block + from_port = 1521 + ip_protocol = "tcp" + to_port = 1521 +} resource "aws_vpc_security_group_ingress_rule" "db_lambda" { security_group_id = aws_security_group.database.id @@ -83,24 +86,24 @@ resource "aws_vpc_security_group_ingress_rule" "db_lambda" { to_port = 22 } -# resource "aws_vpc_security_group_ingress_rule" "db_workspace" { -# security_group_id = aws_security_group.database.id -# description = "Database listener port access to Workspaces" -# cidr_ipv4 = local.application_data.accounts[local.environment].workspace_cidr -# from_port = 1521 -# ip_protocol = "tcp" -# to_port = 1521 -# } +resource "aws_vpc_security_group_ingress_rule" "db_workspace" { + security_group_id = aws_security_group.database.id + description = "Database listener port access to Workspaces" + cidr_ipv4 = local.application_data.accounts[local.environment].workspace_cidr + from_port = 1521 + ip_protocol = "tcp" + to_port = 1521 +} # This is a temp rule whilst OAS resides in LZ -# resource "aws_vpc_security_group_ingress_rule" "oas_lz" { -# security_group_id = aws_security_group.database.id -# description = "Allow OAS in LZ to access APEX" -# cidr_ipv4 = local.application_data.accounts[local.environment].oas_lz_cidr -# from_port = 1521 -# ip_protocol = "tcp" -# to_port = 1521 -# } +resource "aws_vpc_security_group_ingress_rule" "oas_lz" { + security_group_id = aws_security_group.database.id + description = "Allow OAS in LZ to access APEX" + cidr_ipv4 = local.application_data.accounts[local.environment].oas_lz_cidr + from_port = 1521 + ip_protocol = "tcp" + to_port = 1521 +} resource "aws_vpc_security_group_egress_rule" "db_outbound" { security_group_id = aws_security_group.database.id diff --git a/terraform/environments/apex/lambda.tf b/terraform/environments/apex/lambda.tf index b88e4cbaf3e..5b02d01a6cf 100644 --- a/terraform/environments/apex/lambda.tf +++ b/terraform/environments/apex/lambda.tf @@ -2,6 +2,7 @@ locals { create_db_snapshots_script_prefix = "dbsnapshot" delete_db_snapshots_script_prefix = "deletesnapshots" db_connect_script_prefix = "dbconnect" + hash_value = "Y/4+i1hcHvLBzOaCHJ/m9bQLuVtQwr8gnF//AJ2j+S4=" } resource "aws_ssm_parameter" "ssh_key" { @@ -98,6 +99,11 @@ resource "aws_iam_role_policy_attachment" "backup_lambda" { ### S3 for Backup Lambda ################################## +data "aws_s3_object" "nodejs_zip" { + bucket = aws_s3_bucket.backup_lambda.id + key = "nodejs.zip" +} + resource "aws_s3_bucket" "backup_lambda" { bucket = "${local.application_name}-${local.environment}-backup-lambda" tags = merge( @@ -108,11 +114,11 @@ resource "aws_s3_bucket" "backup_lambda" { resource "aws_s3_object" "provision_files" { bucket = aws_s3_bucket.backup_lambda.id - for_each = fileset("./zipfiles/", "**") + for_each = toset(["${local.create_db_snapshots_script_prefix}.zip", "${local.delete_db_snapshots_script_prefix}.zip", "${local.db_connect_script_prefix}.zip"]) key = each.value - source = "./zipfiles/${each.value}" + source = "./scripts/${each.value}" content_type = "application/zip" - source_hash = filemd5("./zipfiles/${each.value}") + source_hash = filemd5("./scripts/${each.value}") } # This delays the creation of resource @@ -161,19 +167,19 @@ resource "aws_s3_bucket_versioning" "backup_lambda" { data "archive_file" "create_db_snapshots" { type = "zip" source_file = "scripts/${local.create_db_snapshots_script_prefix}.js" - output_path = "zipfiles/${local.create_db_snapshots_script_prefix}.zip" + output_path = "scripts/${local.create_db_snapshots_script_prefix}.zip" } data "archive_file" "delete_db_snapshots" { type = "zip" source_file = "scripts/${local.delete_db_snapshots_script_prefix}.py" - output_path = "zipfiles/${local.delete_db_snapshots_script_prefix}.zip" + output_path = "scripts/${local.delete_db_snapshots_script_prefix}.zip" } data "archive_file" "connect_db" { type = "zip" source_file = "scripts/${local.db_connect_script_prefix}.js" - output_path = "zipfiles/${local.db_connect_script_prefix}.zip" + output_path = "scripts/${local.db_connect_script_prefix}.zip" } @@ -206,8 +212,9 @@ resource "aws_lambda_layer_version" "backup_lambda" { license_info = "Apache-2.0" s3_bucket = aws_s3_bucket.backup_lambda.id s3_key = "nodejs.zip" - source_code_hash = filebase64sha256("zipfiles/nodejs.zip") - + source_code_hash = local.hash_value +# Since the nodejs.zip file has been added manually to the s3 bucket the source_code_hash would have to be computed and added manually as well anytime there's a change to nodejs.zip +# This command allows you to retrieve the hash - openssl dgst -sha256 -binary nodejs.zip | base64 compatible_runtimes = ["nodejs18.x"] depends_on = [time_sleep.wait_for_provision_files] # This resource creation will be delayed to ensure object exists in the bucket } diff --git a/terraform/environments/apex/zipfiles/nodejs.zip b/terraform/environments/apex/zipfiles/nodejs.zip deleted file mode 100644 index 0382b4317bc..00000000000 Binary files a/terraform/environments/apex/zipfiles/nodejs.zip and /dev/null differ diff --git a/terraform/environments/ccms-ebs/application_variables.json b/terraform/environments/ccms-ebs/application_variables.json index 23c7e9e63d1..cb27282cf2f 100644 --- a/terraform/environments/ccms-ebs/application_variables.json +++ b/terraform/environments/ccms-ebs/application_variables.json @@ -370,7 +370,7 @@ "ebs_iops_ebsdb_dbf03": 12000, "ebs_size_ebsdb_dbf03": 3000, "ebs_iops_ebsdb_dbf04": 28000, - "ebs_size_ebsdb_dbf04": 500, + "ebs_size_ebsdb_dbf04": 1000, "ebs_size_ebsdb_redoA": 100, "ebs_size_ebsdb_redoB": 50, "ebs_size_ebsdb_techst": 50, diff --git a/terraform/environments/contract-work-administration/backup_lambda.tf b/terraform/environments/contract-work-administration/backup_lambda.tf deleted file mode 100644 index 30c2eb38708..00000000000 --- a/terraform/environments/contract-work-administration/backup_lambda.tf +++ /dev/null @@ -1,300 +0,0 @@ -locals { - create_db_snapshots_script_prefix = "dbsnapshot" - delete_db_snapshots_script_prefix = "deletesnapshots" - db_connect_script_prefix = "dbconnect" -} - -resource "aws_ssm_parameter" "ssh_key" { - name = "EC2_SSH_KEY" # This needs to match the name supplied to the dbconnect.js script - description = "SSH Key used by Lambda function to access database instance for backup. Value is updated manually." - type = "SecureString" - value = "Placeholder" - - tags = merge( - local.tags, - { Name = "EC2_SSH_KEY" } - ) - lifecycle { - ignore_changes = [ - value, - ] - } -} - -################################## -### IAM Role for BackUp Lambda -################################## - -data "aws_iam_policy_document" "backup_lambda" { - statement { - effect = "Allow" - - principals { - type = "Service" - identifiers = ["lambda.amazonaws.com", "ssm.amazonaws.com"] - } - - actions = ["sts:AssumeRole"] - } -} - -resource "aws_iam_role" "backup_lambda" { - name = "${local.application_name_short}-backup-lambda-role" - assume_role_policy = data.aws_iam_policy_document.backup_lambda.json - tags = merge( - local.tags, - { Name = "${local.application_name_short}-backup-lambda-role" } - ) -} - -resource "aws_iam_policy" "backup_lambda" { #tfsec:ignore:aws-iam-no-policy-wildcards - name = "${local.application_name_short}-${local.environment}-backup-lambda-policy" - tags = merge( - local.tags, - { Name = "${local.application_name_short}-${local.environment}-backup-lambda-policy" } - ) - policy = <> " + address); - const conn = new Client(); - console.log(`[+] Running "begin backup commands" as Oracle`); - conn.on('ready', () => { - console.log('Client :: ready'); - conn.exec('sudo su - oracle -c "sqlplus / as sysdba < { - if (err) { - reject(err); - } - stream.on('close', (code, signal) => { - conn.end(); - console.log('Stream :: close :: code: ' + code + ', signal: ' + signal); - setTimeout(() => { resolve(); }, 2000); // Ugly solution to wait until the ssh socket closes before resolving... - }).on('data', (data) => { - console.log('STDOUT: ' + data); - if (data.toString().toUpperCase().includes("ERROR")) exec_error = true; - }).stderr.on('data', (data) => { - console.log('STDERR: ' + data); - if (data.toString().toUpperCase().includes("ERROR")) exec_error = true; - }) - ; - }); - }).connect({ - host: address, - port: 22, - username: username, - privateKey: myKey, - // debug: console.log, // Uncomment to get more detailed logs - algorithms: { - kex: ["diffie-hellman-group1-sha1"] - } - }); - } else if (action == "end"){ - console.log("[+] Trying connecting to EC2 ==>> " + address); - console.log(`[+] Running "begin backup commands" as Oracle`); - - const conn = new Client(); - conn.on('ready', () => { - console.log('Client :: ready'); - conn.exec('sudo su - oracle -c "sqlplus / as sysdba < { - if (err) { - reject(err); - } - stream.on('close', (code, signal) => { - conn.end(); - console.log('Stream :: close :: code: ' + code + ', signal: ' + signal); - setTimeout(() => { resolve(); }, 2000); // Ugly solution to wait until the ssh socket closes before resolving... - }).on('data', (data) => { - console.log('STDOUT: ' + data); - if (data.toString().toUpperCase().includes("ERROR")) exec_error = true; - }).stderr.on('data', (data) => { - console.log('STDERR: ' + data); - if (data.toString().toUpperCase().includes("ERROR")) exec_error = true; - }) - ; - }); - }).connect({ - host: address, - port: 22, - username: username, - privateKey: myKey, - // debug: console.log, // Uncomment to get more detailed logs - algorithms: { - kex: ["diffie-hellman-group1-sha1"] - } - }); - } - }); - try { - await prom; - console.log('EXEC_ERROR: ' + exec_error); - if (exec_error) { - throw new Error('Please see logs above for more detail.') - } - console.log(`[+] Completed DB alter state: ${action} ==>> ` + address); - } catch (e) { - throw new Error(`SSH Exec did not run successfully on the instance ${address}: ` + e ); - } - } -} - - -exports.handler = async (event, context) => { - try { - console.log("[+} Received event:", JSON.stringify(event, null, 2)); - await connSSH(event.action, event.appname); - - context.done(); - } catch (error) { - throw new Error(error); - } -}; diff --git a/terraform/environments/contract-work-administration/scripts/dbsnapshot.js b/terraform/environments/contract-work-administration/scripts/dbsnapshot.js deleted file mode 100644 index d276e578688..00000000000 --- a/terraform/environments/contract-work-administration/scripts/dbsnapshot.js +++ /dev/null @@ -1,315 +0,0 @@ -///////////////////////////////////////////////////////////////////// -// Automated backup script -// - Calls dbconnect lambda to put DB in backup mode -// - Triggers volume snapshots for all volumes connected to instance -// -// version: 1.0 (for migration to MP) -///////////////////////////////////////////////////////////////////// - -const AWS = require("aws-sdk"); - -//Set date format -var date_ob = new Date(); -var day = ("0" + date_ob.getDate()).slice(-2); -var month = ("0" + (date_ob.getMonth() + 1)).slice(-2); -var year = date_ob.getFullYear(); - -var date = day + "/" + month + "/" + year; - -//lambda object -let lambda = new AWS.Lambda({ apiVersion: "2015-03-31" }); - -//EC2 object -let ec2 = new AWS.EC2({ apiVersion: "2014-10-31" }); - -async function invokeLambdaStart(appname) { - // try { - console.log("[+] Putting DB into backup mode"); - - const lambdaInvokeStart = await lambda - .invoke({ - FunctionName: "connectDBFunction", - InvocationType: "RequestResponse", // This means invoking the function synchronously. Note that if Lambda was able to run the function, the status code is 200, even if the function returned an error. - Payload: JSON.stringify({ action: "begin", appname: appname }), - }) - .promise(); - - //Check lambda returns success - if (lambdaInvokeStart["FunctionError"] == null) - { - // Run the volume snapshots - console.log("[+] Creating volume snapshot"); - await handleSnapshot(appname); - } else { - console.log("Return output: ", lambdaInvokeStart); - throw new Error("The connectDBFunction (begin) Lambda function has an error. Please see that function's logs for more information."); - } - - // } catch (e) { - // throw new Error("[-] " + e); - // } -} - -async function invokeLambdaStop(appname) { - // try { - console.log("[+] Putting DB into normal operations mode"); - - // setTimeout(() => { - // console.log("[+] Waiting for DB....."); - // }, 7000); - - const lambdaInvokeStop = await lambda - .invoke({ - FunctionName: "connectDBFunction", - InvocationType: "RequestResponse", - Payload: JSON.stringify({ action: "end", appname: appname }), - }) - .promise(); - - //Check lambda returns success - if (lambdaInvokeStop["FunctionError"] == null) - { - // Run the volume snapshots - console.log("[+] Datatbase is back in normal operations mode"); - } else { - console.log("Return output: ", lambdaInvokeStop); - throw new Error("The connectDBFunction (end) Lambda function has an error. Please see that function's logs for more information."); - } - - // } catch (e) { - // console.log("[-] " + e); - // throw new Error("The connectDBFunction Lambda (end) function has an error. Please see that function's logs for more information."); - // } -} - -async function invokeLambdaFinal(appname) { - try { - console.log("Waiting for DB to be ready"); - await new Promise(resolve => setTimeout(resolve, 30000)); - console.log("[+] Taking final snapshots out of backup mode"); - await handleSnapshot2(appname); - } catch (e) { - console.log("[-]" + e); - throw new Error("There is an error taking final shapshots."); - } -} - - -// Grab volume id all volumes attached to the instance and snapshot - -async function handleSnapshot(appname) { - try { - // Get all instances of our app - const instances = await getInstanceId(appname); - - // Get all volumes on all instances of our app - var volumes_list = []; - var snapshot_list = []; - for (const instance of instances) { - const volumes = await listVolumes(instance); - volumes_list.push(volumes); - } - - // Loop over instance, if more than 1 instance returned - for (const instance_list of volumes_list) { - for (const volume of instance_list["Volumes"]) { - console.log("Taking snapshot of Volume: ", volume); - var volume_id = volume["VolumeId"]; - var volume_device = volume["Attachments"][0]["Device"]; - var volume_name = ''; - for(var tag of volume['Tags']){ - if(tag['Key'].includes('Name')){ - volume_name = tag['Value']; - } - } - // Trigger EBS snapshots - let snap = await ec2CreateSnapshot(volume_id, appname, volume_device, volume_name, date); - snapshot_list.push(snap.SnapshotId); - } - } - } catch (error) { - console.log(error); - } -} - -//Get instanceId for EC2 instances tagged with Name:{ appname } -// May return more than 1 instance if there are multiple instances with the same name -async function getInstance(appname) { - console.log("Getting all instances tagged with Name:", appname); - return ec2 - .describeInstances({ Filters: [{ Name: "tag:Name", Values: [appname] }] }) - .promise(); -} - -// Capture all app instance IPs in a list -async function getInstanceId(appname) { - var instance_id_list = []; - var instance_data = await getInstance(appname); - for (const res of instance_data["Reservations"]) { - for (const instance of res["Instances"]) { - instance_id_list.push(instance["InstanceId"]); - } - } - console.log("Found ", instance_id_list.length, " instances"); - return instance_id_list; -} - -// List all volumes for EC2 instance - -async function listVolumes(instance_id) { - console.log("getting volumes for ", instance_id); - return ec2 - .describeVolumes({ - Filters: [{ Name: "attachment.instance-id", Values: [instance_id] }], - }) - .promise(); -} - -// Create EC2 snapshot based on volume id - -async function ec2CreateSnapshot(volume, appname, volume_device, volume_name, date) { - console.log("Creating snapshot of volume:", volume, volume_device, volume_name, date); - let params = { - VolumeId: volume, - Description: - appname + " automatically created snapshot and resource volume id: " + volume, - TagSpecifications: [ - { - ResourceType: "snapshot", - Tags: [ - { - Key: "Name", - Value: appname + "-" + volume_name + "-" + volume_device + "-" + date - }, - { - Key: "Application", - Value: appname - }, - { - Key: "Date", - Value: date - }, - { - Key: "dlm:snapshot-with:volume-hourly-35-day-retention", - Value: "yes" - }, - { - Key: "Created_by", - Value: "Automated snapshot created by DBSnapshotFunction Lambda" - } - ], - }, - ], - }; - return ec2.createSnapshot(params).promise(); -} - -async function handleSnapshot2(appname) { - try { - // Get all instances of our app - const instances = await getInstanceId(appname); - - // Get all volumes on all instances of our app - var volumes_list = []; - for (const instance of instances) { - const volumes = await listVolumes(instance); - volumes_list.push(volumes); - } - - // Loop over instance, if more than 1 instance returned - for (const instance_list of volumes_list) { - for (const volume of instance_list["Volumes"]) { - var volume_id = volume["VolumeId"]; - var volume_device = volume["Attachments"][0]["Device"]; - var volume_name=''; - for(var tag of volume['Tags']){ - if(tag['Key'].includes('Name')){ - volume_name = tag['Value']; - } - } - // if the drive is oraarch/oraredo trigger an EBS snapsot - for(const tag of volume['Tags']){ - if (tag['Value'].includes('arch')){ - console.log(volume_id, "is oraarch volume"); - let snap = await ec2CreateSnapshot2(volume_id, appname, volume_device, volume_name, date); - console.log("[+] Taking snapshot " + snap.SnapshotId); - break; - }} - for(const tag of volume['Tags']){ - if (tag['Value'].includes('redo')){ - console.log(volume_id, "is oraredo volume"); - let snap = await ec2CreateSnapshot2(volume_id, appname, volume_device, volume_name, date); - console.log("[+] Taking snapshot " + snap.SnapshotId); - break; - } - } - } - } - } catch (error) { - console.log(error); - } -} - -async function ec2CreateSnapshot2(volume, appname, volume_device, volume_name, date) { - console.log("Creating snapshot of volume:", volume, volume_device, volume_name, date); - let params = { - VolumeId: volume, - Description: - appname + " automatically created snapshot OUT OF BACKUPMODE and resource volume id: " + volume, - TagSpecifications: [ - { - ResourceType: "snapshot", - Tags: [ - { - Key: "Name", - Value: appname + "-" + volume_name + "-" + volume_device + "-" + date - }, - { - Key: "Application", - Value: appname - }, - { - Key: "Date", - Value: date - }, - { - Key: "dlm:snapshot-with:volume-hourly-35-day-retention", - Value: "yes" - }, - { - Key: "Created_by", - Value: "Automated OUT OF BACKUPMODE snapshot created by DBSnapshotFunction Lambda" - } - ], - }, - ], - }; - return ec2.createSnapshot(params).promise(); -} - -exports.handler = async (event, context) => { - const appname = event.appname; - try { - console.log("Putting DB into Hotbackup mode and taking snapshot"); - await invokeLambdaStart(appname); - } - catch (error) { - throw new Error(error); - } - try{ - console.log("Taking DB out of Hotbackup mode"); - await invokeLambdaStop(appname); - } catch (error) { - throw new Error(error); - } - ////////////////////////////////// - // Unsure why this part is required to take a second set of oraarch and oraredo snapshots, thus disabling it for now - ////////////////////////////////// - // try{ - // console.log("Operating outside of Hotbackup mode"); - // await invokeLambdaFinal(appname); - // console.log("Snapshots Complete"); - // } catch (error) { - // throw new Error(error); - // } -}; diff --git a/terraform/environments/contract-work-administration/scripts/deletesnapshots.py b/terraform/environments/contract-work-administration/scripts/deletesnapshots.py deleted file mode 100755 index e222aa8bed8..00000000000 --- a/terraform/environments/contract-work-administration/scripts/deletesnapshots.py +++ /dev/null @@ -1,27 +0,0 @@ -import boto3 -from datetime import datetime - -ec2 = boto3.client('ec2', 'eu-west-2') -paginator = ec2.get_paginator('describe_snapshots') -page_iterator = paginator.paginate(OwnerIds=['self']) - -def lambda_handler(event, context): - count = 0 - for page in page_iterator: - for snapshot in page['Snapshots']: - a = snapshot['StartTime'] - b = a.date() - c = datetime.now().date() - d = c-b - try: - if d.days > 35 and "automatically created snapshot" in snapshot['Description']: - id = snapshot['SnapshotId'] - print("Found an automatically created snapshot older than 35 days", id) - ec2.delete_snapshot(SnapshotId=id) - count += 1 - except Exception as e: - print(e) - if 'InvalidSnapshot.InUse' in str(e): - print("skipping this snapshot") - continue - print(f"Deleted a total of {count} snapshots") \ No newline at end of file diff --git a/terraform/environments/contract-work-administration/versions.tf b/terraform/environments/contract-work-administration/versions.tf index 6161ef3bc02..f54ae34948d 100644 --- a/terraform/environments/contract-work-administration/versions.tf +++ b/terraform/environments/contract-work-administration/versions.tf @@ -8,6 +8,10 @@ terraform { version = "~> 3.0" source = "hashicorp/http" } + archive = { + source = "hashicorp/archive" + version = "~> 2.0" + } } required_version = "~> 1.0" } diff --git a/terraform/environments/contract-work-administration/zipfiles/nodejs.zip b/terraform/environments/contract-work-administration/zipfiles/nodejs.zip deleted file mode 100644 index a9b62c12f7f..00000000000 Binary files a/terraform/environments/contract-work-administration/zipfiles/nodejs.zip and /dev/null differ diff --git a/terraform/environments/corporate-information-system/iam.tf b/terraform/environments/corporate-information-system/iam.tf index aa2fdb4a3b8..4a6865831b6 100644 --- a/terraform/environments/corporate-information-system/iam.tf +++ b/terraform/environments/corporate-information-system/iam.tf @@ -74,44 +74,44 @@ resource "aws_iam_role_policy" "cis_s3fs_policy" { Version = "2012-10-17" Statement = [ { - "Action": [ - "s3:*" + "Action" : [ + "s3:*" ], - "Resource": [ - "arn:aws:s3:::laa-software-bucket2", - "arn:aws:s3:::laa-software-bucket2/*", - "arn:aws:s3:::laa-software-library", - "arn:aws:s3:::laa-software-library/*", - "arn:aws:s3:::laa-cis-inbound-production", - "arn:aws:s3:::laa-cis-inbound-production/*", - "arn:aws:s3:::laa-cis-outbound-production", - "arn:aws:s3:::laa-cis-outbound-production/*", - "arn:aws:s3:::laa-ccms-outbound-production", - "arn:aws:s3:::laa-ccms-outbound-production/*", - "arn:aws:s3:::laa-ccms-inbound-production", - "arn:aws:s3:::laa-ccms-inbound-production/*" + "Resource" : [ + "arn:aws:s3:::laa-software-bucket2", + "arn:aws:s3:::laa-software-bucket2/*", + "arn:aws:s3:::laa-software-library", + "arn:aws:s3:::laa-software-library/*", + "arn:aws:s3:::laa-cis-inbound-production", + "arn:aws:s3:::laa-cis-inbound-production/*", + "arn:aws:s3:::laa-cis-outbound-production", + "arn:aws:s3:::laa-cis-outbound-production/*", + "arn:aws:s3:::laa-ccms-outbound-production", + "arn:aws:s3:::laa-ccms-outbound-production/*", + "arn:aws:s3:::laa-ccms-inbound-production", + "arn:aws:s3:::laa-ccms-inbound-production/*" ], - "Effect": "Allow" - }, - { - "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:DescribeLogStreams", - "logs:PutRetentionPolicy", - "logs:PutLogEvents", - "ec2:DescribeInstances" - ], - "Resource": "*", - "Effect": "Allow" - }, - { - "Action": [ - "ec2:CreateTags" - ], - "Resource": "*", - "Effect": "Allow" - } + "Effect" : "Allow" + }, + { + "Action" : [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutRetentionPolicy", + "logs:PutLogEvents", + "ec2:DescribeInstances" + ], + "Resource" : "*", + "Effect" : "Allow" + }, + { + "Action" : [ + "ec2:CreateTags" + ], + "Resource" : "*", + "Effect" : "Allow" + } ] }) } \ No newline at end of file diff --git a/terraform/environments/corporate-staff-rostering/iam.tf b/terraform/environments/corporate-staff-rostering/iam.tf index ab04adda575..ac1d95191f1 100644 --- a/terraform/environments/corporate-staff-rostering/iam.tf +++ b/terraform/environments/corporate-staff-rostering/iam.tf @@ -42,78 +42,3 @@ resource "aws_iam_user_policy_attachment" "mgn_attach_policy_app_migrationfull_a user = aws_iam_user.mgn_user.name policy_arn = "arn:aws:iam::aws:policy/AWSApplicationMigrationFullAccess" } - -# AD clean up lambda IAM resources - -data "aws_iam_policy_document" "lambda_assume_role_policy" { - statement { - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["lambda.amazonaws.com"] - } - } -} - -resource "aws_iam_role" "lambda-ad-role" { - name = "LambdaFunctionADObjectCleanUp" - tags = local.tags - - assume_role_policy = data.aws_iam_policy_document.lambda_assume_role_policy.json -} - -resource "aws_iam_policy" "lambda-ad-policy" { - # checkov:skip=CKV_AWS_290: "Ensure IAM policies does not allow write access without constraints" - # checkov:skip=CKV_AWS_355: "Ensure no IAM policies documents allow "*" as a statement's resource for restrictable actions" - name = "LambdaADObjectCleanUpPolicy" - description = "Policy to grant AD lambda function VPC access" - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents", - "ec2:CreateNetworkInterface", - "ec2:Describe*", - "ec2:DeleteNetworkInterface", - "ec2:AssignPrivateIpAddresses", - "ec2:UnassignPrivateIpAddresses" - ] - Effect = "Allow" - Resource = "*" - }, - ] - }) -} - -data "aws_iam_policy" "HmppsDomainSecrets" { - name = "HmppsDomainSecretsPolicy" -} - -data "aws_iam_policy" "BusinessUnitKmsCmk" { - name = "BusinessUnitKmsCmkPolicy" -} - -resource "aws_iam_role_policy_attachment" "lambda_secrets" { - role = aws_iam_role.lambda-ad-role.name - policy_arn = data.aws_iam_policy.HmppsDomainSecrets.arn -} - -resource "aws_iam_role_policy_attachment" "lambda_kms" { - role = aws_iam_role.lambda-ad-role.name - policy_arn = data.aws_iam_policy.BusinessUnitKmsCmk.arn -} - -resource "aws_iam_role_policy_attachment" "lambda-ad-policy-attachment" { - role = aws_iam_role.lambda-ad-role.name - policy_arn = aws_iam_policy.lambda-ad-policy.arn -} - - - - diff --git a/terraform/environments/corporate-staff-rostering/lambda.tf b/terraform/environments/corporate-staff-rostering/lambda.tf deleted file mode 100644 index 1e1a227dbf2..00000000000 --- a/terraform/environments/corporate-staff-rostering/lambda.tf +++ /dev/null @@ -1,64 +0,0 @@ -# START: lambda_ad_object_clean_up -locals { - lambda_ad_object_cleanup = { - function_name = "AD-Object-Clean-Up" - } -} - -module "ad-clean-up-lambda" { - #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions - # This is an internal module so commit hashes are not needed - source = "github.com/ministryofjustice/modernisation-platform-terraform-lambda-function?ref=v3.1.0" - - application_name = local.lambda_ad_object_cleanup.function_name - function_name = local.lambda_ad_object_cleanup.function_name - description = "Lambda to remove corresponding computer object from Active Directory upon server termination" - - package_type = "Zip" - filename = "${path.module}/lambda/ad-clean-up/deployment_package.zip" - source_code_hash = filebase64sha256("${path.module}/lambda/ad-clean-up/deployment_package.zip") - handler = "lambda_function.lambda_handler" - runtime = "python3.12" - timeout = 60 - - create_role = false - lambda_role = aws_iam_role.lambda-ad-role.arn - - vpc_subnet_ids = tolist(data.aws_subnets.shared-private.ids) - vpc_security_group_ids = [module.baseline.security_groups["domain"].id] - - allowed_triggers = { - Ec2StateChange = { - principal = "events.amazonaws.com" - source_arn = aws_cloudwatch_event_rule.ec2_state_change_terminated.arn - } - } - - tags = merge( - local.tags, - { - Name = "ad-object-clean-up-lambda" - }, - ) -} - -resource "aws_cloudwatch_event_rule" "ec2_state_change_terminated" { - name = "Ec2StateChangedTerminated" - description = "Rule to trigger Lambda on EC2 state change" - - event_pattern = jsonencode({ - "source" : ["aws.ec2"], - "detail-type" : ["EC2 Instance State-change Notification"], - "detail" : { - "state" : ["terminated"] - } - }) -} - -resource "aws_cloudwatch_event_target" "lambda_ad_clean_up" { - rule = aws_cloudwatch_event_rule.ec2_state_change_terminated.name - target_id = "LambdaTarget" - arn = module.ad-clean-up-lambda.lambda_function_arn -} - -# END: lambda_ad_object_clean_up diff --git a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/.gitignore b/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/.gitignore deleted file mode 100644 index 8c4ada9706f..00000000000 --- a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.log -ad-clean-up-lambda-payload-test.zip diff --git a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/build-lambda-zip.sh b/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/build-lambda-zip.sh deleted file mode 100755 index ade698d613d..00000000000 --- a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/build-lambda-zip.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -# This script must be executed with the Lambda's -# python source directory in lambda/ as the working -# directory ($PWD). -# The ZIP file must be committed in that same directory. -# You can test the script and resultant .zip by running: -# unzip -l deployment_package.zip | less - -readonly LOG_FILE=lambda-build-$(date "+%Y%m%dT%H%M%S").log - -exec 3>&1 4>&2 -trap 'exec 2>&4 1>&3' 0 1 2 3 -exec 1>"$LOG_FILE" 2>&1 - -readonly SOURCE_DIR="." -readonly LAMBDA_ZIP="deployment_package.zip" -readonly BUILD_DIR="build" -readonly VENV_DIR="venv" - -msg() { - echo "$@" >&3 -} - -dependencies=( - "python3" - "zip" -) - -for cmd in "${dependencies[@]}"; do - if ! command -v "$cmd" &>/dev/null; then - msg "Error: Required command '$cmd' is not available." - exit 1 - fi -done - -msg "Creating virtual environment..." -python3 -m venv $VENV_DIR - -msg "Activating virtual environment..." -# shellcheck disable=SC1091 -source $VENV_DIR/bin/activate - -mkdir -p $BUILD_DIR - -msg "Downloading requirements..." -pip install --requirement "$SOURCE_DIR/requirements.txt" --target $BUILD_DIR - -msg "Copying source files..." -cp "$SOURCE_DIR/requirements.txt" $BUILD_DIR/ -cp "$SOURCE_DIR"/*.py $BUILD_DIR/ - -msg "Creating ZIP file..." -(cd $BUILD_DIR && zip --recurse-paths ../$LAMBDA_ZIP ./*) - -msg "Cleaning up..." -deactivate -rm -rf $BUILD_DIR $VENV_DIR - -msg -msg "Lambda package created: $LAMBDA_ZIP" -msg "Full log: $LOG_FILE" diff --git a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/deployment_package.zip b/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/deployment_package.zip deleted file mode 100644 index 263a890c894..00000000000 Binary files a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/deployment_package.zip and /dev/null differ diff --git a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/lambda_function.py b/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/lambda_function.py deleted file mode 100644 index 6bfc2634945..00000000000 --- a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/lambda_function.py +++ /dev/null @@ -1,136 +0,0 @@ -import json -import boto3 -from ldap3 import Server, Connection, SUBTREE - - -# checks for objects within active directory -def check_ad_for_object( - hostname, domain_fqdn, domain_name, search_base, account_id, secret_suffix -): - - # create a secrets manager connection - secrets_manager = boto3.client("secretsmanager") - secret_arn = f"arn:aws:secretsmanager:eu-west-2:{account_id}:secret:/microsoft/AD/{domain_fqdn}/shared-passwords-{secret_suffix}" - print(secret_arn) - - # extract the secret value from hmpps-domain-services-test / hmpps-domain-services-prod - secret_value_response = secrets_manager.get_secret_value(SecretId=secret_arn) - secret_value = secret_value_response["SecretString"] - - # parse the JSON format secret value to get AD password - secret_json = json.loads(secret_value) - ad_password = secret_json.get("svc_join_domain") - - # domain connection details - domain_controller = Server(f"{domain_fqdn}:389") - ad_username = rf"{domain_name}\aws-lambda" - - with Connection( - Server, user=ad_username, password=ad_password, auto_bind=True - ) as conn: - ad_search = search_base - search_filter = f"(sAMAccountName={hostname})" - # subtree for recursive search through defined OU - search_result = conn.search(ad_search, search_filter, SUBTREE) - print(search_result) - - if conn.entries: - # Get the distinguished name (DN) of the found object - object_dn = conn.entries[0].entry_dn - print(object_dn) - print( - f"The object {object_dn} is present in Active Directory and will be deleted..." - ) - # conn.delete(object_dn) # action removed during testing - return 0 # success status - else: - print( - f"The terminated server object {hostname} was not found in Active Directory - no further action taken." - ) - return 1 # object not found status - - -# function to iterate through instance tags -def get_tag_value(tags, key): - for tag in tags: - if tag["Key"] == key: - return tag["Value"] - return None - - -# function to determine test or prod domain values to be used -def determine_domain(environment_tag): - domain_info = {} - if "development" in environment_tag.split( - "-" - ) or "test" in environment_tag.split("-"): - domain_info["domain_type"] = "dev/test" - domain_info["domain_name"] = "azure" - domain_info["domain_fqdn"] = "noms" - domain_info["search_base"] = "ou=Managed-Windows-Servers,ou=Computers,dc=azure,dc=noms,dc=root" - domain_info["account_id"] = "161282055413" # hmpps-domain-services-test - domain_info["secret_suffix"] = "HZv6pW" - elif "preproduction" in environment_tag.split( - "-" - ) or "production" in environment_tag.split("-"): - domain_info["domain_type"] = "preprod/prod" - domain_info["domain_name"] = "hmpp" - domain_info["domain_fqdn"] = "hmpp" - domain_info["search_base"] = "ou=MEMBER_SERVERS,dc=azure,dc=hmpp,dc=root" - domain_info["account_id"] = "905761223702" # hmpps-domain-services-production - domain_info["secret_suffix"] = "NLo3yC" - else: - print("Unexpected environment-name tag. Aborting lambda function...") - return None - return domain_info - - -# function to search active directory if an instance is terminated -def lambda_handler(event, context): - - if event["detail"]["state"] == "terminated": - instance_id = event["detail"]["instance-id"] - - # creates an ec2 connection for terminated instance - ec2 = boto3.client("ec2") - response = ec2.describe_instances(InstanceIds=[instance_id]) - # return the tags associated with the terminated instance - tags = response["Reservations"][0]["Instances"][0]["Tags"] - # terminated instance server-name value, same as hostname - resource_name = "server-name" - - # obtain the hostame for the terminated server - hostname = get_tag_value(tags, resource_name) - print(f"Server hostname is: {hostname}") - - # obtain terminated instance environment-name value - resource_environment = "environment-name" - environment_tag = get_tag_value(tags, resource_environment) - print(f"Server environment is: {environment_tag}") - - # determine appropriate domain variables - domain = determine_domain(environment_tag) - print(domain) - print("Server belongs to {} domain".format(domain["domain_type"])) - - # pass hostname and domain variables into AD oject deletion function - if hostname is not None and domain is not None: - check_ad_for_object( - hostname, - domain["domain_fqdn"], - domain["domain_name"], - domain["search_base"], - domain["account_id"], - domain["secret_suffix"], - ) - print(f"The Active Directory object {hostname} has been deleted.") - else: - print( - f"The '{resource_name}' tag was not found for the terminated instance." - ) - - # 200 http response lambda run successful - return { - "statusCode": 200, - "body": "Active Directory clean up complete. Computer object {resource_name} has been removed.", - } diff --git a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/requirements.txt b/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/requirements.txt deleted file mode 100644 index 1aacc03f58c..00000000000 --- a/terraform/environments/corporate-staff-rostering/lambda/ad-clean-up/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -pyasn1~=0.5 -ldap3~=2.9 diff --git a/terraform/environments/corporate-staff-rostering/locals.tf b/terraform/environments/corporate-staff-rostering/locals.tf index 27bb2195852..e44f27dc5e9 100644 --- a/terraform/environments/corporate-staff-rostering/locals.tf +++ b/terraform/environments/corporate-staff-rostering/locals.tf @@ -26,6 +26,7 @@ locals { "ec2_linux", "ec2_instance_linux", "ec2_instance_oracle_db_with_backup", + "ssm_command", ] cloudwatch_metric_alarms_default_actions = ["pagerduty"] cloudwatch_metric_oam_links_ssm_parameters = ["hmpps-oem-${local.environment}"] @@ -45,6 +46,7 @@ locals { enable_s3_db_backup_bucket = true enable_s3_shared_bucket = true enable_s3_software_bucket = true + enable_ssm_command_monitoring = true s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] software_bucket_name = "csr-software" } diff --git a/terraform/environments/corporate-staff-rostering/locals_development.tf b/terraform/environments/corporate-staff-rostering/locals_development.tf index 42412f3af9a..d2edb5537b1 100644 --- a/terraform/environments/corporate-staff-rostering/locals_development.tf +++ b/terraform/environments/corporate-staff-rostering/locals_development.tf @@ -159,13 +159,5 @@ locals { } } } - - secretsmanager_secrets = { - "/activedirectory/devtest/aws-lambda" = { - secrets = { - passwords = { description = "active directory lambda service account" } - } - } - } } } diff --git a/terraform/environments/corporate-staff-rostering/locals_preproduction.tf b/terraform/environments/corporate-staff-rostering/locals_preproduction.tf index 086431211ae..02d691ebadc 100644 --- a/terraform/environments/corporate-staff-rostering/locals_preproduction.tf +++ b/terraform/environments/corporate-staff-rostering/locals_preproduction.tf @@ -664,32 +664,24 @@ locals { } }) http-7770 = merge(local.lbs.rxy.listeners.http-7770, { - alarm_target_group_names = ["pp-csr-w-34-7770"] - cloudwatch_metric_alarms = {} default_action = { type = "forward" target_group_name = "pp-csr-w-34-7770" } }) http-7771 = merge(local.lbs.rxy.listeners.http-7771, { - alarm_target_group_names = ["pp-csr-w-34-7771"] - cloudwatch_metric_alarms = {} default_action = { type = "forward" target_group_name = "pp-csr-w-34-7771" } }) http-7780 = merge(local.lbs.rxy.listeners.http-7780, { - alarm_target_group_names = ["pp-csr-w-34-7780"] - cloudwatch_metric_alarms = {} default_action = { type = "forward" target_group_name = "pp-csr-w-34-7780" } }) http-7781 = merge(local.lbs.rxy.listeners.http-7781, { - alarm_target_group_names = ["pp-csr-w-34-7781"] - cloudwatch_metric_alarms = {} default_action = { type = "forward" target_group_name = "pp-csr-w-34-7781" diff --git a/terraform/environments/corporate-staff-rostering/main.tf b/terraform/environments/corporate-staff-rostering/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/corporate-staff-rostering/main.tf +++ b/terraform/environments/corporate-staff-rostering/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/delius-core/files/user_outbound_table_mapping.json b/terraform/environments/delius-core/files/user_outbound_table_mapping.json index f8a2e966334..95a33bf196b 100644 --- a/terraform/environments/delius-core/files/user_outbound_table_mapping.json +++ b/terraform/environments/delius-core/files/user_outbound_table_mapping.json @@ -21,6 +21,18 @@ }, "rule-action": "include", "filters": [] + }, + { + "rule-type": "transformation", + "rule-id": "32", + "rule-name": "remove_staff_id", + "rule-target": "column", + "object-locator": { + "schema-name": "DELIUS_APP_SCHEMA", + "table-name": "USER_", + "column-name": "STAFF_ID" + }, + "rule-action": "remove-column" } ] } diff --git a/terraform/environments/delius-core/locals_development.tf b/terraform/environments/delius-core/locals_development.tf index 5eda99ec858..9cc30fcb7c0 100644 --- a/terraform/environments/delius-core/locals_development.tf +++ b/terraform/environments/delius-core/locals_development.tf @@ -76,10 +76,11 @@ locals { delius_microservices_configs_dev = { weblogic = { - image_tag = "6.2.0.3" - container_port = 8080 - container_memory = 4096 - container_cpu = 2048 + image_tag = "6.2.0.3" + container_port = 8080 + container_memory = 4096 + container_cpu = 2048 + task_definition_revision = 9 } weblogic_eis = { @@ -129,6 +130,6 @@ locals { user_target_endpoint = { write_database = "DMDNDA" } - is-production = local.is-production + is-production = false } } diff --git a/terraform/environments/delius-core/locals_environments_all.tf b/terraform/environments/delius-core/locals_environments_all.tf index 878b62a5b17..09cda09fd84 100644 --- a/terraform/environments/delius-core/locals_environments_all.tf +++ b/terraform/environments/delius-core/locals_environments_all.tf @@ -17,7 +17,7 @@ locals { ordered_subnets = [local.ordered_subnet_ids] data_subnet_ids = data.aws_subnets.shared-data.ids data_subnet_a_id = data.aws_subnet.data_subnets_a.id - route53_inner_zone_info = data.aws_route53_zone.inner + route53_inner_zone = data.aws_route53_zone.inner route53_network_services_zone = data.aws_route53_zone.network-services route53_external_zone = data.aws_route53_zone.external shared_vpc_id = data.aws_vpc.shared.id @@ -26,7 +26,8 @@ locals { general_shared = data.aws_kms_key.general_shared.arn rds_shared = data.aws_kms_key.rds_shared.arn } - dns_suffix = "${local.application_name}.${var.networking[0].business-unit}-${local.environment}.modernisation-platform.service.justice.gov.uk" + dns_suffix = "${local.application_name}.${var.networking[0].business-unit}-${local.environment}.modernisation-platform.service.justice.gov.uk" + internal_dns_suffix = "${local.application_name}.${var.networking[0].business-unit}-${local.environment}.modernisation-platform.internal" } platform_vars = { diff --git a/terraform/environments/delius-core/locals_preproduction.tf b/terraform/environments/delius-core/locals_preproduction.tf index 813631aed0f..ab4a084988b 100644 --- a/terraform/environments/delius-core/locals_preproduction.tf +++ b/terraform/environments/delius-core/locals_preproduction.tf @@ -21,13 +21,13 @@ locals { encrypted = true migration_source_account_id = "010587221707" migration_lambda_role = "ldap-data-migration-lambda-role" - efs_throughput_mode = "bursting" + efs_throughput_mode = "elastic" efs_provisioned_throughput = null efs_backup_schedule = "cron(0 19 * * ? *)", efs_backup_retention_period = "30" port = 389 tls_port = 636 - desired_count = 0 + desired_count = 1 } @@ -140,7 +140,8 @@ locals { user_target_endpoint = { write_database = "PRENDA" } - is-production = local.is-production + # Auditing from the Pre-Prod environment is considered production data + is-production = true } } diff --git a/terraform/environments/delius-core/locals_stage.tf b/terraform/environments/delius-core/locals_stage.tf index c68bf61590d..f082743a0ef 100644 --- a/terraform/environments/delius-core/locals_stage.tf +++ b/terraform/environments/delius-core/locals_stage.tf @@ -140,6 +140,7 @@ locals { user_target_endpoint = { write_database = "STGNDA" } - is-production = local.is-production + # Auditing from the Stage environment is considered production data + is-production = true } } diff --git a/terraform/environments/delius-core/locals_test.tf b/terraform/environments/delius-core/locals_test.tf index d99ea8448df..6d4fcfa6497 100644 --- a/terraform/environments/delius-core/locals_test.tf +++ b/terraform/environments/delius-core/locals_test.tf @@ -128,6 +128,6 @@ locals { read_database = "TSTNDA" } user_target_endpoint = {} - is-production = local.is-production + is-production = false } } diff --git a/terraform/environments/delius-core/modules/components/dms/cloudwatch-alarms.tf b/terraform/environments/delius-core/modules/components/dms/cloudwatch-alarms.tf index 45fc07e546b..e3f21c87b96 100644 --- a/terraform/environments/delius-core/modules/components/dms/cloudwatch-alarms.tf +++ b/terraform/environments/delius-core/modules/components/dms/cloudwatch-alarms.tf @@ -1,7 +1,34 @@ # SNS topic for monitoring to send alarms to -resource "aws_sns_topic" "dms_alerting" { - name = "delius-dms-alerting" +resource "aws_sns_topic" "dms_alerts_topic" { + name = "delius-dms-alerts-topic" kms_master_key_id = var.account_config.kms_keys.general_shared + + http_success_feedback_role_arn = aws_iam_role.sns_logging_role.arn + http_success_feedback_sample_rate = 100 + http_failure_feedback_role_arn = aws_iam_role.sns_logging_role.arn +} + +resource "aws_iam_role" "sns_logging_role" { + name = "sns-logging-role" + + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Action" : "sts:AssumeRole", + "Principal" : { + "Service" : "sns.amazonaws.com" + }, + "Effect" : "Allow", + "Sid" : "" + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "attach_sns_policy" { + role = aws_iam_role.sns_logging_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonSNSRole" } # Create a map of all possible replication tasks, so those that exist may have alarms applied to them. @@ -74,8 +101,8 @@ resource "aws_cloudwatch_metric_alarm" "dms_cdc_latency_source" { evaluation_periods = 3 period = 120 actions_enabled = true - alarm_actions = [aws_sns_topic.dms_alerting.arn] - ok_actions = [aws_sns_topic.dms_alerting.arn] + alarm_actions = [aws_sns_topic.dms_alerts_topic.arn] + ok_actions = [aws_sns_topic.dms_alerts_topic.arn] dimensions = { ReplicationInstanceIdentifier = aws_dms_replication_instance.dms_replication_instance.replication_instance_id # We only need to final element of the replication task ID (after the last :) @@ -96,8 +123,8 @@ resource "aws_cloudwatch_metric_alarm" "dms_cdc_latency_target" { evaluation_periods = 3 period = 120 actions_enabled = true - alarm_actions = [aws_sns_topic.dms_alerting.arn] - ok_actions = [aws_sns_topic.dms_alerting.arn] + alarm_actions = [aws_sns_topic.dms_alerts_topic.arn] + ok_actions = [aws_sns_topic.dms_alerts_topic.arn] dimensions = { ReplicationInstanceIdentifier = aws_dms_replication_instance.dms_replication_instance.replication_instance_id # We only need to final element of the replication task ID (after the last :) @@ -131,9 +158,153 @@ locals { module "pagerduty_core_alerts" { #checkov:skip=CKV_TF_1 depends_on = [ - aws_sns_topic.dms_alerting + aws_sns_topic.dms_alerts_topic ] source = "github.com/ministryofjustice/modernisation-platform-terraform-pagerduty-integration?ref=v2.0.0" - sns_topics = [aws_sns_topic.dms_alerting.name] + sns_topics = [aws_sns_topic.dms_alerts_topic.name] pagerduty_integration_key = local.pagerduty_integration_keys[local.integration_key_lookup] } + + +# Raising a Cloudwatch Alarm on a DMS Replication Task Event is not directly possible using the +# Cloudwatch Alarm Integration in PagerDuty as the JSON payload is different. Therefore, as +# workaround for this we create a custom Cloudwatch Metric which is populated by the replication event and +# create a Cloudwatch Alarm on this Metric in the usual way to allow for raising alarms. + +# Create Role which allows Lamdba to put a custom cloudwatch metric +resource "aws_iam_role" "lambda_put_metric_data_role" { + name = "lambda-put-metric-data-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Action = "sts:AssumeRole", + Effect = "Allow", + Principal = { + Service = "lambda.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_policy" "lambda_put_metric_data_policy" { + name = "lambda-put-metric-data-policy" + + policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Action = [ + "cloudwatch:PutMetricData" + ], + Resource = "*" + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_put_metric_data_policy_attach" { + role = aws_iam_role.lambda_put_metric_data_role.name + policy_arn = aws_iam_policy.lambda_put_metric_data_policy.arn +} + +# Allow Cloudwatch Logging +resource "aws_iam_role_policy_attachment" "lambda_put_metric_data_logging_attach" { + role = aws_iam_role.lambda_put_metric_data_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +} + +# Creates a ZIP file containing the contents of the lambda directory which +# contains a Python script to calculate and put the custom metric +data "archive_file" "lambda_dms_replication_metric_zip" { + type = "zip" + source_dir = "${path.module}/lambda" + output_path = "${path.module}/lambda/dms_replication_metric.zip" + excludes = ["dms_replication_metric.zip"] +} + +# Define a Lambda Function using the python script in the ZIP file - +# we define the namespace of the custom metric (CustomDMSMetrics) +# and the Metric Name (DMSReplication Failure). The value of this +# metric is 0 if the replication task is not stopped (normal state), +# and 1 if not (whether it has been stopped manually or has failed) +resource "aws_lambda_function" "dms_replication_metric_publisher" { + function_name = "dms-replication-metric-publisher" + role = aws_iam_role.lambda_put_metric_data_role.arn + handler = "dms_replication_metric.lambda_handler" + runtime = "python3.8" + filename = data.archive_file.lambda_dms_replication_metric_zip.output_path + source_code_hash = data.archive_file.lambda_dms_replication_metric_zip.output_base64sha256 + environment { + variables = { + METRIC_NAMESPACE = "CustomDMSMetrics", + METRIC_NAME = "DMSReplicationFailure" + } + } + + depends_on = [data.archive_file.lambda_dms_replication_metric_zip] +} + +# Set Lambda function to allow the Replication Events call this functions +resource "aws_lambda_permission" "allow_sns_invoke_dms_replication_metric_publisher_handler" { + statement_id = "AllowSNSInvoke" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.dms_replication_metric_publisher.function_name + principal = "sns.amazonaws.com" + + source_arn = aws_sns_topic.dms_events_topic.arn +} + +resource "aws_cloudwatch_metric_alarm" "dms_replication_stopped_alarm" { + for_each = toset(local.replication_task_names) + alarm_name = "DMSReplicationStoppedAlarm_${each.key}" + alarm_description = "Alarm when Stopped Replication Task for ${each.key}" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + threshold = 0 + treat_missing_data = "ignore" + datapoints_to_alarm = 1 + namespace = "CustomDMSMetrics" + metric_name = "DMSReplicationStopped" + statistic = "Maximum" + period = "60" + + dimensions = { + SourceId = each.key + } + + alarm_actions = [aws_sns_topic.dms_alerts_topic.arn] + ok_actions = [aws_sns_topic.dms_alerts_topic.arn] +} + + + +# SNS Topic for DMS replication events +# This is NOT the same as for DMS Cloudwatch Alarms (dms_alerting) +# and is used to trigger the Lamda function if an event happens during +# DMS Replication (Events are NOT detected by CloudWatch Alarms) +resource "aws_sns_topic" "dms_events_topic" { + name = "delius-dms-events-topic" + + lambda_success_feedback_role_arn = aws_iam_role.sns_logging_role.arn + lambda_success_feedback_sample_rate = 100 + lambda_failure_feedback_role_arn = aws_iam_role.sns_logging_role.arn +} + +resource "aws_sns_topic_subscription" "dms_events_lambda_subscription" { + topic_arn = aws_sns_topic.dms_events_topic.arn + protocol = "lambda" + endpoint = aws_lambda_function.dms_replication_metric_publisher.arn +} + +# We handle State Change and Failure DMS Events +resource "aws_dms_event_subscription" "dms_task_event_subscription" { + name = "dms-task-event-alerts" + sns_topic_arn = aws_sns_topic.dms_events_topic.arn + source_type = "replication-task" + event_categories = ["state change", "failure"] + enabled = true +} \ No newline at end of file diff --git a/terraform/environments/delius-core/modules/components/dms/dms_db_source_endpoints.tf b/terraform/environments/delius-core/modules/components/dms/dms_db_source_endpoints.tf index 23d86e73ca7..b9595237945 100644 --- a/terraform/environments/delius-core/modules/components/dms/dms_db_source_endpoints.tf +++ b/terraform/environments/delius-core/modules/components/dms/dms_db_source_endpoints.tf @@ -13,9 +13,9 @@ resource "aws_dms_endpoint" "dms_audit_source_endpoint_db" { engine_name = "oracle" username = local.dms_audit_username password = join(",", [jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username], jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username]]) - server_name = join(".", [var.oracle_db_server_names[var.dms_config.audit_source_endpoint.read_host], var.account_config.route53_inner_zone_info.name]) + server_name = join(".", [var.oracle_db_server_names[var.dms_config.audit_source_endpoint.read_host], var.account_config.route53_inner_zone.name]) port = local.db_tcps_port - extra_connection_attributes = "ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names[var.dms_config.audit_source_endpoint.read_host], var.account_config.route53_inner_zone_info.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" + extra_connection_attributes = "ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names[var.dms_config.audit_source_endpoint.read_host], var.account_config.route53_inner_zone.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" # We initially use an empty wallet for encryption - a populated wallet will be added by DMS configuration ssl_mode = "verify-ca" certificate_arn = aws_dms_certificate.empty_oracle_wallet.certificate_arn @@ -37,9 +37,9 @@ resource "aws_dms_endpoint" "dms_user_source_endpoint_db" { engine_name = "oracle" username = local.dms_audit_username password = join(",", [jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username], jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username]]) - server_name = join(".", [var.oracle_db_server_names[var.dms_config.user_source_endpoint.read_host], var.account_config.route53_inner_zone_info.name]) + server_name = join(".", [var.oracle_db_server_names[var.dms_config.user_source_endpoint.read_host], var.account_config.route53_inner_zone.name]) port = local.db_tcps_port - extra_connection_attributes = "ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names[var.dms_config.user_source_endpoint.read_host], var.account_config.route53_inner_zone_info.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" + extra_connection_attributes = "ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names[var.dms_config.user_source_endpoint.read_host], var.account_config.route53_inner_zone.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" # We initially use an empty wallet for encryption - a populated wallet will be added by DMS configuration ssl_mode = "verify-ca" certificate_arn = aws_dms_certificate.empty_oracle_wallet.certificate_arn diff --git a/terraform/environments/delius-core/modules/components/dms/dms_db_target_endpoints.tf b/terraform/environments/delius-core/modules/components/dms/dms_db_target_endpoints.tf index 8610fea4df1..4a963e03ed0 100644 --- a/terraform/environments/delius-core/modules/components/dms/dms_db_target_endpoints.tf +++ b/terraform/environments/delius-core/modules/components/dms/dms_db_target_endpoints.tf @@ -9,9 +9,9 @@ resource "aws_dms_endpoint" "dms_user_target_endpoint_db" { engine_name = "oracle" username = local.dms_audit_username password = join(",", [jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username], jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username]]) - server_name = join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone_info.name]) + server_name = join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone.name]) port = local.db_tcps_port - extra_connection_attributes = "UseDirectPathFullLoad=false;ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone_info.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" + extra_connection_attributes = "UseDirectPathFullLoad=false;ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" # We initially use an empty wallet for encryption - a populated wallet will be added by DMS configuration ssl_mode = "verify-ca" certificate_arn = aws_dms_certificate.empty_oracle_wallet.certificate_arn @@ -32,9 +32,9 @@ resource "aws_dms_endpoint" "dms_audit_target_endpoint_db" { engine_name = "oracle" username = local.dms_audit_username password = join(",", [jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username], jsondecode(data.aws_secretsmanager_secret_version.delius_core_application_passwords.secret_string)[local.dms_audit_username]]) - server_name = join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone_info.name]) + server_name = join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone.name]) port = local.db_tcps_port - extra_connection_attributes = "UseDirectPathFullLoad=false;ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone_info.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" + extra_connection_attributes = "UseDirectPathFullLoad=false;ArchivedLogDestId=1;AdditionalArchivedLogDestId=32;asm_server=${join(".", [var.oracle_db_server_names["primarydb"], var.account_config.route53_inner_zone.name])}:${local.db_tcps_port}/+ASM;asm_user=${local.dms_audit_username};UseBFile=true;UseLogminerReader=false;" # We initially use an empty wallet for encryption - a populated wallet will be added by DMS configuration ssl_mode = "verify-ca" certificate_arn = aws_dms_certificate.empty_oracle_wallet.certificate_arn @@ -43,4 +43,4 @@ resource "aws_dms_endpoint" "dms_audit_target_endpoint_db" { ignore_changes = [certificate_arn] } depends_on = [aws_dms_certificate.empty_oracle_wallet] -} \ No newline at end of file +} diff --git a/terraform/environments/delius-core/modules/components/dms/lambda/dms_replication_metric.py b/terraform/environments/delius-core/modules/components/dms/lambda/dms_replication_metric.py new file mode 100644 index 00000000000..80195163a82 --- /dev/null +++ b/terraform/environments/delius-core/modules/components/dms/lambda/dms_replication_metric.py @@ -0,0 +1,69 @@ +import boto3 +import json +import logging +import re + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +def lambda_handler(event, context): + + cloudwatch = boto3.client('cloudwatch') + for record in event['Records']: + + message = json.loads(record['Sns']['Message']) + logger.info("SNS Message: %s",message) + + event_message = message.get("Event Message") + event_source = message.get("Event Source") + source_id = message.get("SourceId") + + dms_event_id = re.search(r"#(DMS-EVENT-\d+) $",message.get("Event ID")) + + # DMS Event IDs are documented at https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Events.html + # + # Those relevant for this metric are: + # + # Running Replication: + # DMS-EVENT-0069: The replication task has started. + # DMS-EVENT-0081: A reload of table details has been requested. + # DMS-EVENT-0093: Reading resumed. + running_replication = ["DMS-EVENT-0069","DMS-EVENT-0081","DMS-EVENT-0093"] + # + # Stopped Replication: + # DMS-EVENT-0079: The replication task has stopped. + # DMS-EVENT-0091: Reading paused, swap files limit reached. + # DMS-EVENT-0092: Reading paused, disk usage limit reached. + # DMS-EVENT-0078: A replication task has failed. + stopped_replication = ["DMS-EVENT-0079","DMS-EVENT-0091","DMS-EVENT-0092","DMS-EVENT-0078"] + + if dms_event_id.group(1) in running_replication: + logger.info("TASK START: " + event_source + " task " + source_id + " started") + cloudwatch.put_metric_data( + Namespace='CustomDMSMetrics', + MetricData=[ + { + 'MetricName': 'DMSReplicationStopped', + 'Dimensions': [ + {'Name': 'SourceId', 'Value': source_id} + ], + 'Value': 0, # Reset Below Trigger threshold (Task Started) + 'Unit': 'Count' + } + ] + ) + elif dms_event_id.group(1) in stopped_replication: + logger.info("TASK STOPPED: " + event_source + " task " + source_id + " stopped") + cloudwatch.put_metric_data( + Namespace='CustomDMSMetrics', + MetricData=[ + { + 'MetricName': 'DMSReplicationStopped', + 'Dimensions': [ + {'Name': 'SourceId', 'Value': source_id} + ], + 'Value': 1, # Trigger threshold (Task Failed) + 'Unit': 'Count' + } + ] + ) \ No newline at end of file diff --git a/terraform/environments/delius-core/modules/components/dms/locals.tf b/terraform/environments/delius-core/modules/components/dms/locals.tf index 7d7b9f0a42a..4953e696009 100644 --- a/terraform/environments/delius-core/modules/components/dms/locals.tf +++ b/terraform/environments/delius-core/modules/components/dms/locals.tf @@ -41,4 +41,15 @@ locals { dms_s3_writer_role_name = "${var.env_name}-dms-s3-writer-role" dms_s3_reader_role_name = "${var.env_name}-dms-s3-reader-role" + replication_task_names = concat( + try([aws_dms_replication_task.user_inbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.business_interaction_inbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.audited_interaction_inbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.audited_interaction_checksum_inbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.audited_interaction_outbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.business_interaction_outbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.audited_interaction_outbound_replication[0].replication_task_id], []), + try([aws_dms_replication_task.audited_interaction_checksum_outbound_replication[0].replication_task_id], []) + ) + } \ No newline at end of file diff --git a/terraform/environments/delius-core/modules/components/oracle_db_instance/dns.tf b/terraform/environments/delius-core/modules/components/oracle_db_instance/dns.tf index 4db68e76596..0a7b0ea8f94 100644 --- a/terraform/environments/delius-core/modules/components/oracle_db_instance/dns.tf +++ b/terraform/environments/delius-core/modules/components/oracle_db_instance/dns.tf @@ -1,7 +1,7 @@ resource "aws_route53_record" "db_ec2_instance_internal" { provider = aws.core-vpc - zone_id = var.account_config.route53_inner_zone_info.zone_id - name = var.db_type == "primary" ? "${var.account_info.application_name}-${var.env_name}-${var.db_suffix}-${var.db_count_index}.${var.account_config.route53_inner_zone_info.name}" : "${var.account_info.application_name}-${var.env_name}-${var.db_suffix}-${var.db_count_index + 1}.${var.account_config.route53_inner_zone_info.name}" + zone_id = var.account_config.route53_inner_zone.zone_id + name = var.db_type == "primary" ? "${var.account_info.application_name}-${var.env_name}-${var.db_suffix}-${var.db_count_index}.${var.account_config.route53_inner_zone.name}" : "${var.account_info.application_name}-${var.env_name}-${var.db_suffix}-${var.db_count_index + 1}.${var.account_config.route53_inner_zone.name}" type = "CNAME" ttl = 60 records = [module.instance.aws_instance.private_dns] diff --git a/terraform/environments/delius-core/modules/delius_environment/alb_frontend.tf b/terraform/environments/delius-core/modules/delius_environment/alb_frontend.tf index 2a68a0c40f6..af5a02ea15d 100644 --- a/terraform/environments/delius-core/modules/delius_environment/alb_frontend.tf +++ b/terraform/environments/delius-core/modules/delius_environment/alb_frontend.tf @@ -17,15 +17,6 @@ resource "aws_vpc_security_group_ingress_rule" "delius_core_frontend_alb_ingress cidr_ipv4 = "81.134.202.29/32" # MoJ Digital VPN } -resource "aws_vpc_security_group_ingress_rule" "delius_core_frontend_alb_ingress_http_allowlist" { - security_group_id = aws_security_group.delius_frontend_alb_security_group.id - description = "access into delius core frontend alb over http (will redirect)" - from_port = "80" - to_port = "80" - ip_protocol = "tcp" - cidr_ipv4 = "81.134.202.29/32" # MoJ Digital VPN -} - resource "aws_vpc_security_group_ingress_rule" "delius_core_frontend_alb_ingress_https_global_protect_allowlist" { for_each = toset(local.moj_ips) security_group_id = aws_security_group.delius_frontend_alb_security_group.id @@ -36,15 +27,12 @@ resource "aws_vpc_security_group_ingress_rule" "delius_core_frontend_alb_ingress cidr_ipv4 = each.key # Global Protect VPN } -# resource "aws_vpc_security_group_ingress_rule" "delius_core_frontend_alb_ingress_http_global_protect_allowlist" { -# for_each = toset(local.moj_ips) -# security_group_id = aws_security_group.delius_frontend_alb_security_group.id -# description = "access into delius core frontend alb over http (will redirect)" -# from_port = "80" -# to_port = "80" -# ip_protocol = "tcp" -# cidr_ipv4 = each.key # Global Protect VPN -# } +resource "aws_vpc_security_group_egress_rule" "delius_core_frontend_alb_egress_to_ecs_cluster" { + security_group_id = aws_security_group.delius_frontend_alb_security_group.id + description = "egress from delius core frontend alb to ecs cluster" + ip_protocol = "-1" + referenced_security_group_id = aws_security_group.cluster.id +} # tfsec:ignore:aws-elb-alb-not-public resource "aws_lb" "delius_core_frontend" { @@ -152,4 +140,3 @@ resource "aws_lb_listener_rule" "blocked_paths_listener_rule" { } } } - diff --git a/terraform/environments/delius-core/modules/delius_environment/alfresco.tf b/terraform/environments/delius-core/modules/delius_environment/alfresco.tf new file mode 100644 index 00000000000..c0024d20346 --- /dev/null +++ b/terraform/environments/delius-core/modules/delius_environment/alfresco.tf @@ -0,0 +1,264 @@ +module "alfresco_efs" { + source = "../helpers/efs" + + name = "alfresco" + env_name = var.env_name + creation_token = "${var.env_name}-sfs" + + kms_key_arn = var.account_config.kms_keys.general_shared + throughput_mode = "elastic" + provisioned_throughput_in_mibps = null + tags = var.tags + enable_platform_backups = false + + vpc_id = var.account_config.shared_vpc_id + subnet_ids = var.account_config.private_subnet_ids + vpc_cidr = var.account_config.shared_vpc_cidr + account_info = var.account_info +} + + +module "alfresco_sfs_ecs" { + source = "../helpers/delius_microservice" + + name = "alfresco-sfs" + env_name = var.env_name + + container_cpu = 2048 + container_memory = 4096 + + container_vars_default = { + "scheduler.content.age.millis" = 518400000 # 6 days + "scheduler.cleanup.interval" = 259200000 # 3 days + } + + container_vars_env_specific = {} + + container_secrets_default = {} + container_secrets_env_specific = {} + + desired_count = 1 + deployment_minimum_healthy_percent = 0 + deployment_maximum_percent = 200 + + container_port_config = [ + { + containerPort = 8099 + protocol = "tcp" + } + ] + + microservice_lb = aws_lb.alfresco_sfs + microservice_lb_https_listener_arn = aws_lb_listener.alfresco_sfs_listener_https.arn + + alb_listener_rule_host_header = "alf-sfs.${var.env_name}.${var.account_config.dns_suffix}" + + target_group_protocol_version = "HTTP1" + + + alb_health_check = { + path = "/" + healthy_threshold = 5 + interval = 30 + protocol = "HTTP" + unhealthy_threshold = 5 + matcher = "200-499" + timeout = 10 + grace_period_seconds = 180 + } + + ecs_cluster_arn = module.ecs.ecs_cluster_arn + cluster_security_group_id = aws_security_group.cluster.id + + bastion_sg_id = module.bastion_linux.bastion_security_group + tags = var.tags + + platform_vars = var.platform_vars + container_image = "ghcr.io/ministryofjustice/hmpps-delius-alfresco-shared-file-store:2.1.2-4" + account_config = var.account_config + + account_info = var.account_info + + ignore_changes_service_task_definition = true + + extra_task_exec_role_policies = { + efs = data.aws_iam_policy_document.alfresco_efs_access_policy + } + + providers = { + aws.core-vpc = aws.core-vpc + aws.core-network-services = aws.core-network-services + } + + log_error_pattern = "%${join("|", local.ldap_formatted_error_codes)}%" + sns_topic_arn = aws_sns_topic.delius_core_alarms.arn + enable_platform_backups = false + frontend_lb_arn_suffix = aws_lb.alfresco_sfs.arn_suffix + + efs_volumes = [ + { + host_path = null + name = "sfs" + efs_volume_configuration = [{ + file_system_id = module.alfresco_efs.fs_id + root_directory = "/" + transit_encryption = "ENABLED" + transit_encryption_port = 2049 + authorization_config = [{ + access_point_id = module.alfresco_efs.access_point_id + iam = "DISABLED" + }] + }] + } + ] + + mount_points = [{ + sourceVolume = "sfs" + containerPath = "/tmp/Alfresco" + readOnly = false + }] + + ecs_service_egress_security_group_ids = [ + { + ip_protocol = "-1" + cidr_ipv4 = "0.0.0.0/0" + description = "Allow all outbound traffic to any IPv4 address" + } + ] + + nlb_ingress_security_group_ids = [ + { + port = 8099 + ip_protocol = "tcp" + cidr_ipv4 = var.account_config.shared_vpc_cidr + description = "Allow inbound traffic from VPC" + }, + { + port = 8099 + ip_protocol = "udp" + cidr_ipv4 = var.account_config.shared_vpc_cidr + description = "Allow inbound traffic from VPC" + }, + { + port = 8099 + ip_protocol = "tcp" + cidr_ipv4 = var.account_info.cp_cidr + description = "Allow inbound LDAP traffic from CP" + }, + { + port = 8099 + ip_protocol = "udp" + cidr_ipv4 = var.account_info.cp_cidr + description = "Allow inbound LDAP traffic from CP" + }, + { + port = 2049 + ip_protocol = "tcp" + referenced_security_group_id = module.ldap.efs_sg_id + description = "EFS ingress" + } + ] + + ecs_service_ingress_security_group_ids = [ + { + port = 8099 + ip_protocol = "tcp" + cidr_ipv4 = var.account_config.shared_vpc_cidr + description = "Allow inbound traffic from VPC" + }, + { + port = 8099 + ip_protocol = "udp" + cidr_ipv4 = var.account_config.shared_vpc_cidr + description = "Allow inbound traffic from VPC" + }, + { + port = 8099 + ip_protocol = "tcp" + cidr_ipv4 = var.account_info.cp_cidr + description = "Allow inbound web traffic from CP" + }, + { + port = 8099 + ip_protocol = "udp" + cidr_ipv4 = var.account_info.cp_cidr + description = "Allow inbound web traffic from CP" + }, + { + port = 2049 + ip_protocol = "tcp" + referenced_security_group_id = module.ldap.efs_sg_id + description = "EFS ingress" + } + ] +} + +data "aws_iam_policy_document" "alfresco_efs_access_policy" { + statement { + actions = [ + "elasticfilesystem:ClientRootAccess", + "elasticfilesystem:ClientWrite", + "elasticfilesystem:ClientMount" + ] + resources = [ + module.ldap.efs_fs_arn + ] + effect = "Allow" + } +} + +resource "aws_security_group" "alfresco_sfs_alb" { + name = "${var.env_name}-alf-sfs-alb" + description = "controls access to and from alfresco sfs load balancer" + vpc_id = var.account_config.shared_vpc_id + tags = local.tags + lifecycle { + create_before_destroy = true + } +} + +resource "aws_vpc_security_group_ingress_rule" "alfresco_sfs_alb" { + for_each = toset([var.account_info.cp_cidr, var.account_config.shared_vpc_cidr]) + security_group_id = aws_security_group.alfresco_sfs_alb.id + description = "Access into alb over https" + from_port = "443" + to_port = "443" + ip_protocol = "tcp" + cidr_ipv4 = each.key +} + +resource "aws_vpc_security_group_egress_rule" "alfresco_sfs_alb" { + security_group_id = aws_security_group.alfresco_sfs_alb.id + description = "egress from alb to ecs cluster" + ip_protocol = "-1" + cidr_ipv4 = var.account_config.shared_vpc_cidr +} + +# internal application load balancer +resource "aws_lb" "alfresco_sfs" { + name = "${var.app_name}-${var.env_name}-alf-sfs-alb" + internal = true + load_balancer_type = "application" + security_groups = [aws_security_group.alfresco_sfs_alb.id] + subnets = var.account_config.private_subnet_ids + + enable_deletion_protection = false + drop_invalid_header_fields = true +} + + +resource "aws_lb_listener" "alfresco_sfs_listener_https" { + load_balancer_arn = aws_lb.alfresco_sfs.id + port = 443 + protocol = "HTTPS" + certificate_arn = local.certificate_arn + ssl_policy = "ELBSecurityPolicy-TLS-1-2-2017-01" + + default_action { + type = "fixed-response" + fixed_response { + content_type = "text/plain" + status_code = "404" + } + } +} diff --git a/terraform/environments/delius-core/modules/delius_environment/db_ec2.tf b/terraform/environments/delius-core/modules/delius_environment/db_ec2.tf index b9083d57c68..6aa6ceca16a 100644 --- a/terraform/environments/delius-core/modules/delius_environment/db_ec2.tf +++ b/terraform/environments/delius-core/modules/delius_environment/db_ec2.tf @@ -140,8 +140,8 @@ # for item in var.db_config : item.name => item # } # provider = aws.core-vpc -# zone_id = var.account_config.route53_inner_zone_info.zone_id -# name = each.key == "primary-db" ? "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone_info.name}" : "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone_info.name}" +# zone_id = var.account_config.route53_inner_zone.zone_id +# name = each.key == "primary-db" ? "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone.name}" : "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone.name}" # type = "CNAME" # ttl = 300 # records = [aws_instance.db_ec2_instance[each.key].private_dns] @@ -350,8 +350,8 @@ # for item in var.db_config : item.name => item # } # provider = aws.core-vpc -# zone_id = var.account_config.route53_inner_zone_info.zone_id -# name = each.key == "primary-db" ? "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone_info.name}" : "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone_info.name}" +# zone_id = var.account_config.route53_inner_zone.zone_id +# name = each.key == "primary-db" ? "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone.name}" : "delius-${var.env_name}-db-${index(var.db_config, each.value) + 1}.${var.account_config.route53_inner_zone.name}" # type = "CNAME" # ttl = 300 # records = [aws_instance.db_ec2_instance[each.key].private_dns] diff --git a/terraform/environments/delius-core/modules/delius_environment/delius_frontend_alb.tf b/terraform/environments/delius-core/modules/delius_environment/delius_frontend_alb.tf deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/terraform/environments/delius-core/modules/delius_environment/ldap_ecs.tf b/terraform/environments/delius-core/modules/delius_environment/ldap_ecs.tf index 27ccc657d48..b143dfe5e30 100644 --- a/terraform/environments/delius-core/modules/delius_environment/ldap_ecs.tf +++ b/terraform/environments/delius-core/modules/delius_environment/ldap_ecs.tf @@ -51,7 +51,7 @@ module "ldap_ecs" { container_image = "${var.platform_vars.environment_management.account_ids["core-shared-services-production"]}.dkr.ecr.eu-west-2.amazonaws.com/delius-core-openldap-ecr-repo:${var.delius_microservice_configs.ldap.image_tag}" account_config = var.account_config - health_check = { + container_health_check = { command = ["CMD-SHELL", "ldapsearch -x -H ldap://localhost:389 -b '' -s base '(objectclass=*)' namingContexts"] interval = 30 retries = 3 @@ -60,8 +60,7 @@ module "ldap_ecs" { } account_info = var.account_info - ignore_changes_service_task_definition = false - force_new_deployment = false + ignore_changes_service_task_definition = true extra_task_exec_role_policies = { efs = data.aws_iam_policy_document.ldap_efs_access_policy diff --git a/terraform/environments/delius-core/modules/delius_environment/pwm.tf b/terraform/environments/delius-core/modules/delius_environment/pwm.tf index 3509f9b3016..c5c6164fcbf 100644 --- a/terraform/environments/delius-core/modules/delius_environment/pwm.tf +++ b/terraform/environments/delius-core/modules/delius_environment/pwm.tf @@ -54,14 +54,22 @@ module "pwm" { platform_vars = var.platform_vars - container_image = "${var.platform_vars.environment_management.account_ids["core-shared-services-production"]}.dkr.ecr.eu-west-2.amazonaws.com/delius-core-password-management:${var.delius_microservice_configs.pwm.image_tag}" - account_config = var.account_config - health_check_path = "/" - health_check_interval = "15" - account_info = var.account_info - - target_group_protocol_version = "HTTP1" - health_check_grace_period_seconds = 10 + container_image = "${var.platform_vars.environment_management.account_ids["core-shared-services-production"]}.dkr.ecr.eu-west-2.amazonaws.com/delius-core-password-management:${var.delius_microservice_configs.pwm.image_tag}" + account_config = var.account_config + account_info = var.account_info + + target_group_protocol_version = "HTTP1" + + alb_health_check = { + path = "/" + healthy_threshold = 5 + interval = 30 + protocol = "HTTP" + unhealthy_threshold = 5 + matcher = "200-499" + timeout = 10 + grace_period_seconds = 180 + } container_cpu = var.delius_microservice_configs.pwm.container_cpu container_memory = var.delius_microservice_configs.pwm.container_memory @@ -72,6 +80,7 @@ module "pwm" { container_secrets_default = { "CONFIG_PASSWORD" : nonsensitive(aws_ssm_parameter.delius_core_pwm_config_password.arn), "LDAP_PASSWORD" : nonsensitive(aws_ssm_parameter.ldap_admin_password.arn), + "SECURITY_KEY" : nonsensitive(aws_ssm_parameter.security_key.arn), "SES_JSON" : nonsensitive(aws_ssm_parameter.pwm_ses_smtp_user.arn) } @@ -85,13 +94,11 @@ module "pwm" { email_from_address = "no-reply@${aws_ses_domain_identity.pwm.domain}" email_smtp_address = "email-smtp.eu-west-2.amazonaws.com" })), - "SECURITY_KEY" = "${base64encode(uuid())}", - "JAVA_OPTS" = "-Xmx${floor(var.delius_microservice_configs.pwm.container_memory * 0.75)}m -Xms${floor(var.delius_microservice_configs.pwm.container_memory * 0.25)}m" + "JAVA_OPTS" = "-Xmx${floor(var.delius_microservice_configs.pwm.container_memory * 0.75)}m -Xms${floor(var.delius_microservice_configs.pwm.container_memory * 0.25)}m" } container_vars_env_specific = try(var.delius_microservice_configs.pwm.container_vars_env_specific, {}) - ignore_changes_service_task_definition = false - force_new_deployment = false + ignore_changes_service_task_definition = true providers = { aws.core-vpc = aws.core-vpc @@ -120,8 +127,18 @@ module "pwm" { enable_platform_backups = var.enable_platform_backups } +resource "aws_ssm_parameter" "security_key" { + name = "/${var.env_name}/pwm/security_key" + type = "SecureString" + value = random_id.security_key.hex +} - +resource "random_id" "security_key" { + keepers = { + image_tag = var.delius_microservice_configs.pwm.image_tag + } + byte_length = 32 +} ############# # SES diff --git a/terraform/environments/delius-core/modules/delius_environment/weblogic.tf b/terraform/environments/delius-core/modules/delius_environment/weblogic.tf index 658c0e24bdb..2d4314603c2 100644 --- a/terraform/environments/delius-core/modules/delius_environment/weblogic.tf +++ b/terraform/environments/delius-core/modules/delius_environment/weblogic.tf @@ -19,8 +19,22 @@ module "weblogic" { ecs_cluster_arn = module.ecs.ecs_cluster_arn env_name = var.env_name - health_check_path = "/NDelius-war/delius/JSP/healthcheck.jsp?ping" - microservice_lb = aws_lb.delius_core_frontend + pin_task_definition_revision = try(var.delius_microservice_configs.weblogic.task_definition_revision, 0) + + alb_health_check = { + path = "/NDelius-war/delius/JSP/healthcheck.jsp?ping" + healthy_threshold = 5 + interval = 30 + protocol = "HTTP" + unhealthy_threshold = 5 + matcher = "200-499" + timeout = 5 + grace_period_seconds = 300 + } + + microservice_lb = aws_lb.delius_core_frontend + + target_group_protocol_version = "HTTP1" name = "weblogic" container_image = "${var.platform_vars.environment_management.account_ids["core-shared-services-production"]}.dkr.ecr.eu-west-2.amazonaws.com/delius-core-weblogic:${var.delius_microservice_configs.weblogic.image_tag}" @@ -28,10 +42,33 @@ module "weblogic" { tags = var.tags db_ingress_security_groups = [] + container_cpu = var.delius_microservice_configs.weblogic.container_cpu + container_memory = var.delius_microservice_configs.weblogic.container_memory + deployment_maximum_percent = 200 + deployment_minimum_healthy_percent = 100 + + ecs_service_ingress_security_group_ids = [] + ecs_service_egress_security_group_ids = [ + { + ip_protocol = "tcp" + port = 389 + cidr_ipv4 = var.account_config.shared_vpc_cidr + }, + { + ip_protocol = "udp" + port = 389 + cidr_ipv4 = var.account_config.shared_vpc_cidr + }, + { + ip_protocol = "tcp" + port = 1521 + cidr_ipv4 = var.account_config.shared_vpc_cidr + } + ] + cluster_security_group_id = aws_security_group.cluster.id ignore_changes_service_task_definition = false - force_new_deployment = false providers = { aws.core-vpc = aws.core-vpc @@ -44,14 +81,14 @@ module "weblogic" { bastion_sg_id = module.bastion_linux.bastion_security_group - - container_vars_default = { for name in local.weblogic_ssm.vars : name => data.aws_ssm_parameter.weblogic_ssm[name].value } - container_secrets_default = { + container_secrets_default = merge({ for name in local.weblogic_ssm.secrets : name => module.weblogic_ssm.arn_map[name] - } - + }, { + "JDBC_PASSWORD" = "${module.oracle_db_shared.database_application_passwords_secret_arn}:delius_pool::" + } + ) } diff --git a/terraform/environments/delius-core/modules/delius_environment/weblogic_eis.tf b/terraform/environments/delius-core/modules/delius_environment/weblogic_eis.tf index 503c863ff9b..ce08ba24080 100644 --- a/terraform/environments/delius-core/modules/delius_environment/weblogic_eis.tf +++ b/terraform/environments/delius-core/modules/delius_environment/weblogic_eis.tf @@ -68,9 +68,16 @@ module "weblogic_eis" { container_memory = var.delius_microservice_configs.weblogic_eis.container_memory container_cpu = var.delius_microservice_configs.weblogic_eis.container_cpu - health_check_path = "/NDelius-war/delius/JSP/healthcheck.jsp?ping" - health_check_grace_period_seconds = 600 - health_check_interval = 30 + alb_health_check = { + path = "/NDelius-war/delius/JSP/healthcheck.jsp?ping" + healthy_threshold = 5 + interval = 30 + protocol = "HTTP" + unhealthy_threshold = 5 + matcher = "200-499" + timeout = 10 + grace_period_seconds = 300 + } db_ingress_security_groups = [] @@ -88,8 +95,7 @@ module "weblogic_eis" { platform_vars = var.platform_vars tags = var.tags - ignore_changes_service_task_definition = false - force_new_deployment = false + ignore_changes_service_task_definition = true providers = { aws.core-vpc = aws.core-vpc diff --git a/terraform/environments/delius-core/modules/delius_environment/weblogic_params.tf b/terraform/environments/delius-core/modules/delius_environment/weblogic_params.tf index 07a9d6f421c..53344c64792 100644 --- a/terraform/environments/delius-core/modules/delius_environment/weblogic_params.tf +++ b/terraform/environments/delius-core/modules/delius_environment/weblogic_params.tf @@ -135,13 +135,13 @@ locals { module "weblogic_ssm" { source = "../helpers/ssm_params" application_name = "weblogic" - environment_name = "delius-core-${var.env_name}" + environment_name = "${var.account_info.application_name}-${var.env_name}" params_plain = local.weblogic_ssm.vars params_secure = local.weblogic_ssm.secrets } data "aws_ssm_parameter" "weblogic_ssm" { for_each = toset(local.weblogic_ssm.vars) - name = "/delius-core-${var.env_name}/weblogic/${each.key}" + name = "/${var.account_info.application_name}-${var.env_name}/weblogic/${each.key}" } diff --git a/terraform/environments/delius-core/modules/helpers/delius_microservice/ecs.tf b/terraform/environments/delius-core/modules/helpers/delius_microservice/ecs.tf index f03b5554344..9fdfde284ab 100644 --- a/terraform/environments/delius-core/modules/helpers/delius_microservice/ecs.tf +++ b/terraform/environments/delius-core/modules/helpers/delius_microservice/ecs.tf @@ -1,5 +1,5 @@ module "container_definition" { - source = "git::https://github.com/ministryofjustice/modernisation-platform-terraform-ecs-cluster//container?ref=v4.3.0" + source = "git::https://github.com/ministryofjustice/modernisation-platform-terraform-ecs-cluster//container?ref=v5.0.0" name = var.name image = var.container_image memory = var.container_memory @@ -9,7 +9,7 @@ module "container_definition" { environment = local.calculated_container_vars_list - health_check = var.health_check + health_check = var.container_health_check secrets = local.calculated_container_secrets_list port_mappings = var.container_port_config @@ -35,7 +35,7 @@ module "ecs_policies" { } module "ecs_service" { - source = "git::https://github.com/ministryofjustice/modernisation-platform-terraform-ecs-cluster//service?ref=v4.3.0" + source = "git::https://github.com/ministryofjustice/modernisation-platform-terraform-ecs-cluster//service?ref=v5.0.0" container_definitions = nonsensitive(module.container_definition.json_encoded_list) cluster_arn = var.ecs_cluster_arn name = "${var.env_name}-${var.name}" @@ -43,6 +43,8 @@ module "ecs_service" { task_cpu = var.container_cpu task_memory = var.container_memory + pin_task_definition_revision = var.pin_task_definition_revision + desired_count = var.desired_count deployment_maximum_percent = var.deployment_maximum_percent deployment_minimum_healthy_percent = var.deployment_minimum_healthy_percent @@ -51,7 +53,7 @@ module "ecs_service" { task_role_arn = "arn:aws:iam::${var.account_info.id}:role/${module.ecs_policies.task_role.name}" task_exec_role_arn = "arn:aws:iam::${var.account_info.id}:role/${module.ecs_policies.task_exec_role.name}" - health_check_grace_period_seconds = var.health_check_grace_period_seconds + health_check_grace_period_seconds = var.alb_health_check.grace_period_seconds service_load_balancers = var.microservice_lb != null ? concat([{ target_group_arn = aws_lb_target_group.frontend[0].arn @@ -62,13 +64,11 @@ module "ecs_service" { efs_volumes = var.efs_volumes - security_groups = [aws_security_group.ecs_service.id] + security_groups = [aws_security_group.ecs_service.id, var.cluster_security_group_id] subnets = var.account_config.private_subnet_ids enable_execute_command = true - ignore_changes = var.ignore_changes_service_task_definition - tags = var.tags } diff --git a/terraform/environments/delius-core/modules/helpers/delius_microservice/load_balancing.tf b/terraform/environments/delius-core/modules/helpers/delius_microservice/load_balancing.tf index 7f46a8cd665..5b48d9eb624 100644 --- a/terraform/environments/delius-core/modules/helpers/delius_microservice/load_balancing.tf +++ b/terraform/environments/delius-core/modules/helpers/delius_microservice/load_balancing.tf @@ -28,13 +28,13 @@ resource "aws_lb_target_group" "frontend" { } health_check { - path = var.health_check_path - healthy_threshold = "5" - interval = var.health_check_interval - protocol = "HTTP" - unhealthy_threshold = "5" - matcher = "200-499" - timeout = "5" + path = var.alb_health_check.path + healthy_threshold = var.alb_health_check.healthy_threshold + interval = var.alb_health_check.interval + protocol = var.alb_health_check.protocol + unhealthy_threshold = var.alb_health_check.unhealthy_threshold + matcher = var.alb_health_check.matcher + timeout = var.alb_health_check.timeout } lifecycle { @@ -149,7 +149,7 @@ resource "aws_lb_listener" "services" { resource "aws_route53_record" "services_nlb_r53_record" { provider = aws.core-vpc - zone_id = var.account_config.route53_inner_zone_info.zone_id + zone_id = var.account_config.route53_inner_zone.zone_id name = "${var.name}.service.${var.env_name}" type = "A" alias { diff --git a/terraform/environments/delius-core/modules/helpers/delius_microservice/variables.tf b/terraform/environments/delius-core/modules/helpers/delius_microservice/variables.tf index 51c3cf601fa..93ac9c86fe9 100644 --- a/terraform/environments/delius-core/modules/helpers/delius_microservice/variables.tf +++ b/terraform/environments/delius-core/modules/helpers/delius_microservice/variables.tf @@ -217,12 +217,6 @@ variable "platform_vars" { }) } -variable "health_check_grace_period_seconds" { - description = "The amount of time, in seconds, that Amazon ECS waits before unhealthy instances are shut down." - type = number - default = 60 -} - variable "ecs_cluster_arn" { description = "The ARN of the ECS cluster" type = string @@ -376,18 +370,6 @@ variable "alb_security_group_id" { default = null } -variable "health_check_path" { - description = "The health check path for the alb target group" - type = string - default = "/" -} - -variable "health_check_interval" { - description = "The health check interval for the alb target group" - type = string - default = "300" -} - variable "alb_stickiness_enabled" { description = "Enable or disable stickiness" type = string @@ -496,12 +478,6 @@ variable "redeploy_on_apply" { default = false } -variable "force_new_deployment" { - description = "Force a new deployment" - type = bool - default = false -} - variable "ecs_service_ingress_security_group_ids" { description = "Security group ids to allow ingress to the ECS service" type = list(object({ @@ -587,7 +563,7 @@ variable "extra_task_exec_role_policies" { default = {} } -variable "health_check" { +variable "container_health_check" { description = "The health check configuration for the container" type = object({ command = list(string) @@ -599,6 +575,30 @@ variable "health_check" { default = null } +variable "alb_health_check" { + description = "The health check configuration for the ALB" + type = object({ + path = string + interval = number + timeout = number + healthy_threshold = number + unhealthy_threshold = number + matcher = string + protocol = string + grace_period_seconds = number + }) + default = { + path = "/" + interval = 30 + timeout = 5 + healthy_threshold = 5 + unhealthy_threshold = 5 + matcher = "200-499" + protocol = "HTTP" + grace_period_seconds = 120 + } +} + variable "nlb_ingress_security_group_ids" { description = "Security group ids to allow ingress to the ECS service" type = list(object({ @@ -628,3 +628,9 @@ variable "system_controls" { type = list(any) default = [] } + +variable "pin_task_definition_revision" { + type = number + description = "The revision of the task definition to use" + default = 0 +} diff --git a/terraform/environments/delius-jitbit/locals.tf b/terraform/environments/delius-jitbit/locals.tf index 561d1ffa9e0..e8e6e1cb010 100644 --- a/terraform/environments/delius-jitbit/locals.tf +++ b/terraform/environments/delius-jitbit/locals.tf @@ -41,7 +41,7 @@ locals { module.ip_addresses.moj_cidr.ark_dc_external_internet, module.ip_addresses.moj_cidr.vodafone_dia_networks, module.ip_addresses.moj_cidr.palo_alto_primsa_access_corporate, - module.ip_addresses.moj_cidr.digital_prisons, + module.ip_addresses.moj_cidr.mojo_azure_landing_zone_egress, [ # Route53 Healthcheck Access Cidrs # London Region not support yet, so metrics are not yet publised, can be enabled at later stage for Route53 endpoint monitor diff --git a/terraform/environments/delius-mis/locals_environments_all.tf b/terraform/environments/delius-mis/locals_environments_all.tf index ed82f49e100..fe53dee7e0c 100644 --- a/terraform/environments/delius-mis/locals_environments_all.tf +++ b/terraform/environments/delius-mis/locals_environments_all.tf @@ -18,7 +18,7 @@ locals { subnet_set = local.subnet_set data_subnet_ids = data.aws_subnets.shared-data.ids data_subnet_a_id = data.aws_subnet.data_subnets_a.id - route53_inner_zone_info = data.aws_route53_zone.inner + route53_inner_zone = data.aws_route53_zone.inner route53_network_services_zone = data.aws_route53_zone.network-services route53_external_zone = data.aws_route53_zone.external shared_vpc_id = data.aws_vpc.shared.id diff --git a/terraform/environments/delius-nextcloud/locals_environments_all.tf b/terraform/environments/delius-nextcloud/locals_environments_all.tf index 7960768e454..9063ac740bb 100644 --- a/terraform/environments/delius-nextcloud/locals_environments_all.tf +++ b/terraform/environments/delius-nextcloud/locals_environments_all.tf @@ -18,7 +18,7 @@ locals { subnet_set = local.subnet_set data_subnet_ids = data.aws_subnets.shared-data.ids data_subnet_a_id = data.aws_subnet.data_subnets_a.id - route53_inner_zone_info = data.aws_route53_zone.inner + route53_inner_zone = data.aws_route53_zone.inner route53_network_services_zone = data.aws_route53_zone.network-services route53_external_zone = data.aws_route53_zone.external shared_vpc_id = data.aws_vpc.shared.id diff --git a/terraform/environments/digital-prison-reporting/application_variables.json b/terraform/environments/digital-prison-reporting/application_variables.json index 5ef69277647..add4e97b9c6 100644 --- a/terraform/environments/digital-prison-reporting/application_variables.json +++ b/terraform/environments/digital-prison-reporting/application_variables.json @@ -94,10 +94,7 @@ "setup_sonatype_secrets": true, "setup_scheduled_action_iam_role": true, "setup_redshift_schedule": true, - "dps_domains": [ - "dps-activities", - "dps-case-notes" - ], + "dps_domains": ["dps-activities", "dps-case-notes", "dps-basm"], "alarms": { "setup_cw_alarms": true, "redshift": { @@ -176,7 +173,8 @@ "enable_dbt_k8s_secrets": true, "dpr_generic_athena_workgroup": true, "analytics_generic_athena_workgroup": true, - "redshift_table_expiry_seconds": "604800" + "redshift_table_expiry_seconds": "604800", + "enable_s3_data_migrate_lambda": true }, "test": { "project_short_id": "dpr", @@ -272,10 +270,7 @@ "setup_sonatype_secrets": false, "setup_scheduled_action_iam_role": true, "setup_redshift_schedule": true, - "dps_domains": [ - "dps-activities", - "dps-case-notes" - ], + "dps_domains": ["dps-activities", "dps-case-notes", "dps-basm"], "alarms": { "setup_cw_alarms": true, "redshift": { @@ -354,7 +349,8 @@ "enable_dbt_k8s_secrets": true, "dpr_generic_athena_workgroup": true, "analytics_generic_athena_workgroup": true, - "redshift_table_expiry_seconds": "604800" + "redshift_table_expiry_seconds": "604800", + "enable_s3_data_migrate_lambda": true }, "preproduction": { "project_short_id": "dpr", @@ -452,10 +448,7 @@ "setup_scheduled_action_iam_role": true, "setup_redshift_schedule": true, "enable_redshift_health_check": true, - "dps_domains": [ - "dps-activities", - "dps-case-notes" - ], + "dps_domains": ["dps-activities", "dps-case-notes", "dps-basm"], "alarms": { "setup_cw_alarms": true, "redshift": { @@ -552,7 +545,8 @@ ] } ], - "redshift_table_expiry_seconds": "604800" + "redshift_table_expiry_seconds": "604800", + "enable_s3_data_migrate_lambda": true }, "production": { "project_short_id": "dpr", @@ -648,10 +642,7 @@ "setup_sonatype_secrets": false, "setup_scheduled_action_iam_role": false, "setup_redshift_schedule": false, - "dps_domains": [ - "dps-activities", - "dps-case-notes" - ], + "dps_domains": ["dps-activities", "dps-case-notes", "dps-basm"], "alarms": { "setup_cw_alarms": true, "redshift": { @@ -745,7 +736,8 @@ ] } ], - "redshift_table_expiry_seconds": "86400" + "redshift_table_expiry_seconds": "86400", + "enable_s3_data_migrate_lambda": false } } } diff --git a/terraform/environments/digital-prison-reporting/data_product_definitions.tf b/terraform/environments/digital-prison-reporting/data_product_definitions.tf new file mode 100644 index 00000000000..f80ef2204ae --- /dev/null +++ b/terraform/environments/digital-prison-reporting/data_product_definitions.tf @@ -0,0 +1,132 @@ +locals { + table_name = "${local.project}-data-product-definition" +} + +module "dynamo_table_dpd" { + source = "./modules/dynamo_tables" + create_table = true + autoscaling_enabled = false + name = local.table_name + + hash_key = "data-product-id" + range_key = "category" + table_class = "STANDARD" + ttl_enabled = false + + attributes = [ + { + name = "data-product-id" + type = "S" + }, + { + name = "category" + type = "S" + } + ] + + global_secondary_indexes = [ + { + name = "category-index" + hash_key = "category" + projection_type = "ALL" + } + ] + + tags = merge( + local.all_tags, + { + Name = local.table_name + Resource_Type = "Dynamo Table" + } + ) +} + +// Allow GitHub Actions run from the definitions repo to deploy/undeploy DPDs in the DynamoDB table. + +data "aws_iam_policy_document" "dpd_table_github_deploy_assume_role_policy" { + statement { + effect = "Allow" + actions = ["sts:AssumeRoleWithWebIdentity"] + + principals { + type = "Federated" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/token.actions.githubusercontent.com"] + } + condition { + test = "StringEquals" + values = ["sts.amazonaws.com"] + variable = "token.actions.githubusercontent.com:aud" + } + condition { + test = "StringLike" + values = ["repo:ministryofjustice/hmpps-dpr-data-product-definitions:*"] + variable = "token.actions.githubusercontent.com:sub" + } + } +} + +data "aws_iam_policy_document" "dpd_table_github_deploy_put_policy" { + statement { + sid = "DeployDpdItems" + effect = "Allow" + actions = [ + "dynamodb:PutItem", + "dynamodb:DescribeTable", + "dynamodb:DeleteItem", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:Query", + "dynamodb:UpdateItem" + ] + resources = [ + module.dynamo_table_dpd.dynamodb_table_arn + ] + } +} + +resource "aws_iam_policy" "dpd_table_github_deploy_put_policy" { + name = "${local.project}-dpd-table-github-deploy-put-policy" + description = "Allows deploying DPDs to the DPD DDB table" + policy = data.aws_iam_policy_document.dpd_table_github_deploy_put_policy.json +} + +resource "aws_iam_role" "dpd_table_github_deploy_role" { + name = "${local.project}-dpd-table-github-deploy-role" + assume_role_policy = data.aws_iam_policy_document.dpd_table_github_deploy_assume_role_policy.json + tags = local.tags +} + +resource "aws_iam_role_policy_attachment" "dpd_table_github_deploy_put_policy" { + policy_arn = aws_iam_policy.dpd_table_github_deploy_put_policy.arn + role = aws_iam_role.dpd_table_github_deploy_role.name +} + +// Allow the Main API to read the DPD table. + +data "aws_iam_policy_document" "dpd_table_read_policy" { + statement { + sid = "DeployDpdItems" + effect = "Allow" + actions = [ + "dynamodb:DescribeTable", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:Query", + ] + resources = [ + module.dynamo_table_dpd.dynamodb_table_arn + ] + } +} + +resource "aws_iam_policy" "dpd_table_read_policy" { + name = "${local.project}-dpd-table-read-policy" + description = "Allows reading DPDs from the DPD DDB table" + policy = data.aws_iam_policy_document.dpd_table_read_policy.json +} + +// Attach DDB read policy to existing cross account role +resource "aws_iam_role_policy_attachment" "dpd_table_read_policy" { + policy_arn = aws_iam_policy.dpd_table_read_policy.arn + role = aws_iam_role.dataapi_cross_role.name +} diff --git a/terraform/environments/digital-prison-reporting/dpr_aws_s3_data_lifecycle_migrate.tf b/terraform/environments/digital-prison-reporting/dpr_aws_s3_data_lifecycle_migrate.tf new file mode 100644 index 00000000000..29ce32d7ef9 --- /dev/null +++ b/terraform/environments/digital-prison-reporting/dpr_aws_s3_data_lifecycle_migrate.tf @@ -0,0 +1,38 @@ +# Domain Builder Backend Lambda function +module "aws_s3_data_migrate" { + source = "./modules/lambdas/generic" + + enable_lambda = local.enable_s3_data_migrate_lambda + name = local.lambda_s3_data_migrate_name + s3_bucket = local.lambda_s3_data_migrate_code_s3_bucket + s3_key = local.lambda_s3_data_migrate_code_s3_key + handler = local.lambda_s3_data_migrate_handler + runtime = local.lambda_s3_data_migrate_runtime + policies = local.lambda_s3_data_migrate_policies + tracing = local.lambda_s3_data_migrate_tracing + + log_retention_in_days = local.lambda_log_retention_in_days + + # Set timeout to the maximum of 900 seconds (15 minutes) + timeout = 900 + + # Optional: Adjust memory size if needed + memory_size = 2048 + + vpc_settings = { + subnet_ids = [data.aws_subnet.data_subnets_a.id, data.aws_subnet.data_subnets_b.id, data.aws_subnet.data_subnets_c.id] + security_group_ids = [aws_security_group.lambda_generic[0].id, ] + } + + tags = merge( + local.all_tags, + { + Resource_Group = "dpr-operations" + Jira = "DPR2-1368" + Resource_Type = "lambda" + Name = local.lambda_s3_data_migrate_name + } + ) + + depends_on = [aws_iam_policy.s3_read_access_policy, aws_iam_policy.s3_read_write_policy, aws_iam_policy.kms_read_access_policy] +} \ No newline at end of file diff --git a/terraform/environments/digital-prison-reporting/glue-connections.tf b/terraform/environments/digital-prison-reporting/glue-connections.tf index 0ba8d5028c6..71ab036c02b 100644 --- a/terraform/environments/digital-prison-reporting/glue-connections.tf +++ b/terraform/environments/digital-prison-reporting/glue-connections.tf @@ -79,6 +79,7 @@ resource "aws_glue_connection" "glue_dps_connection" { resource "aws_security_group" "glue_job_connection_sg" { #checkov:skip=CKV2_AWS_5 + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" name = "${local.project}-glue-connection_sg" description = "Security group for glue jobs when using Glue Connections" vpc_id = data.aws_vpc.shared.id diff --git a/terraform/environments/digital-prison-reporting/locals.tf b/terraform/environments/digital-prison-reporting/locals.tf index fb54482bba6..1113141203b 100644 --- a/terraform/environments/digital-prison-reporting/locals.tf +++ b/terraform/environments/digital-prison-reporting/locals.tf @@ -403,19 +403,15 @@ locals { environment_configuration = local.environment_configurations[local.environment] environment_configurations = { development = { - observability_platform_account_id = local.environment_management.account_ids["observability-platform-development"] analytical_platform_runner_suffix = "-dev" } test = { - observability_platform_account_id = local.environment_management.account_ids["observability-platform-development"] analytical_platform_runner_suffix = "-test" } preproduction = { - observability_platform_account_id = local.environment_management.account_ids["observability-platform-development"] analytical_platform_runner_suffix = "-pp" } production = { - observability_platform_account_id = local.environment_management.account_ids["observability-platform-production"] analytical_platform_runner_suffix = "" } } @@ -427,4 +423,19 @@ locals { Name = local.application_name } ) + + # DPR Operations, + # S3 Data Migration Lambda + enable_s3_data_migrate_lambda = local.application_data.accounts[local.environment].enable_s3_data_migrate_lambda + lambda_s3_data_migrate_name = "${local.project}-s3-data-lifecycle-migration-lambda" + lambda_s3_data_migrate_code_s3_bucket = module.s3_artifacts_store.bucket_id + lambda_s3_data_migrate_code_s3_key = "build-artifacts/dpr-operations/py_files/dpr-s3-data-lifecycle-migration-lambda-v1.zip" + lambda_s3_data_migrate_handler = "dpr-s3-data-lifecycle-migration-lambda-v1.lambda_handler" + lambda_s3_data_migrate_runtime = "python3.11" + lambda_s3_data_migrate_tracing = "PassThrough" + lambda_s3_data_migrate_policies = [ + "arn:aws:iam::${local.account_id}:policy/${local.s3_read_access_policy}", + "arn:aws:iam::${local.account_id}:policy/${local.kms_read_access_policy}", + "arn:aws:iam::${local.account_id}:policy/${local.s3_read_write_policy}" + ] } diff --git a/terraform/environments/digital-prison-reporting/main.tf b/terraform/environments/digital-prison-reporting/main.tf index 2b6d6b3d95c..2007d32f1ab 100644 --- a/terraform/environments/digital-prison-reporting/main.tf +++ b/terraform/environments/digital-prison-reporting/main.tf @@ -850,12 +850,13 @@ module "s3_structured_bucket" { # S3 Curated module "s3_curated_bucket" { - source = "./modules/s3_bucket" - create_s3 = local.setup_buckets - name = "${local.project}-curated-zone-${local.env}" - custom_kms_key = local.s3_kms_arn - create_notification_queue = false # For SQS Queue - enable_lifecycle = true + source = "./modules/s3_bucket" + create_s3 = local.setup_buckets + name = "${local.project}-curated-zone-${local.env}" + custom_kms_key = local.s3_kms_arn + create_notification_queue = false # For SQS Queue + enable_lifecycle = true + enable_intelligent_tiering = false tags = merge( local.all_tags, @@ -866,7 +867,7 @@ module "s3_curated_bucket" { ) } -# S3 Curated +# S3 Temp Reload module "s3_temp_reload_bucket" { source = "./modules/s3_bucket" create_s3 = local.setup_buckets @@ -1230,7 +1231,7 @@ module "dms_nomis_ingestor" { dms_target_name = "kinesis" short_name = "nomis" migration_type = "full-load-and-cdc" - replication_instance_version = "3.4.7" # Upgrade + replication_instance_version = "3.5.2" replication_instance_class = "dms.t3.medium" subnet_ids = [ data.aws_subnet.data_subnets_a.id, data.aws_subnet.data_subnets_b.id, data.aws_subnet.data_subnets_c.id @@ -1248,10 +1249,6 @@ module "dms_nomis_ingestor" { "kinesis_target_stream" = "arn:aws:kinesis:eu-west-2:${data.aws_caller_identity.current.account_id}:stream/${local.kinesis_stream_ingestor}" } - availability_zones = { - 0 = "eu-west-2a" - } - tags = merge( local.all_tags, { @@ -1262,56 +1259,6 @@ module "dms_nomis_ingestor" { ) } -module "dms_fake_data_ingestor" { - source = "./modules/dms_dps" - setup_dms_instance = local.setup_fake_data_dms_instance - enable_replication_task = local.enable_fake_data_replication_task # Disable Replication Task - name = "${local.project}-dms-fake-data-ingestor-${local.env}" - vpc_cidr = [data.aws_vpc.shared.cidr_block] - source_engine_name = "postgres" - source_db_name = "db59b5cf9e5de6b794" - source_app_username = "cp9Zr5bLim" - source_app_password = "whkthrI65zpcFEe5" - source_address = "cloud-platform-59b5cf9e5de6b794.cdwm328dlye6.eu-west-2.rds.amazonaws.com" - source_db_port = 5432 - vpc = data.aws_vpc.shared.id - kinesis_stream_policy = module.kinesis_stream_ingestor.kinesis_stream_iam_policy_admin_arn - project_id = local.project - env = local.environment - dms_source_name = "postgres" - dms_target_name = "kinesis" - short_name = "fake-data" - migration_type = "full-load-and-cdc" - replication_instance_version = "3.4.7" # Rollback - replication_instance_class = "dms.t3.medium" - subnet_ids = [ - data.aws_subnet.data_subnets_a.id, data.aws_subnet.data_subnets_b.id, data.aws_subnet.data_subnets_c.id - ] - - vpc_role_dependency = [aws_iam_role.dmsvpcrole] - cloudwatch_role_dependency = [aws_iam_role.dms_cloudwatch_logs_role] - - kinesis_settings = { - "include_null_and_empty" = "true" - "partition_include_schema_table" = "true" - "include_partition_value" = "true" - "kinesis_target_stream" = "arn:aws:kinesis:eu-west-2:${data.aws_caller_identity.current.account_id}:stream/${local.kinesis_stream_ingestor}" - } - - availability_zones = { - 0 = "eu-west-2a" - } - - tags = merge( - local.all_tags, - { - Name = "${local.project}-dms-fake-data-ingestor-${local.env}" - Resource_Type = "DMS Replication" - Postgres_Source = "DPS" - } - ) -} - # DMS Nomis Data Collector module "dms_nomis_to_s3_ingestor" { source = "./modules/dms" @@ -1349,10 +1296,6 @@ module "dms_nomis_to_s3_ingestor" { bucket_name = module.s3_raw_bucket.bucket_id - availability_zones = { - 0 = "eu-west-2a" - } - depends_on = [ module.s3_raw_bucket.bucket_id ] diff --git a/terraform/environments/digital-prison-reporting/modules/dms/main.tf b/terraform/environments/digital-prison-reporting/modules/dms/main.tf index 504ad6b9441..c5c313de978 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms/main.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms/main.tf @@ -104,7 +104,6 @@ resource "aws_dms_s3_endpoint" "dms-s3-target-endpoint" { max_file_size = 120000 cdc_max_batch_interval = 10 - cdc_inserts_and_updates = true depends_on = [aws_iam_policy.dms-s3-target-policy, aws_iam_policy.dms-operator-s3-policy] @@ -126,6 +125,7 @@ resource "aws_dms_replication_subnet_group" "dms-s3-target-subnet-group" { resource "aws_security_group" "dms_s3_target_sec_group" { #checkov:skip=CKV2_AWS_5 #checkov:skip=CKV_AWS_23: "Ensure every security group and rule has a description" + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" count = var.setup_dms_instance ? 1 : 0 diff --git a/terraform/environments/digital-prison-reporting/modules/dms/variables.tf b/terraform/environments/digital-prison-reporting/modules/dms/variables.tf index ce82700e249..d482c2b5da1 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms/variables.tf @@ -1,4 +1,5 @@ variable "name" { + type = string description = "DMS Replication name." } @@ -56,14 +57,6 @@ variable "migration_type" { description = "DMS Migration Type" } -variable "availability_zones" { - default = [ - { - 0 = "eu-west-2a" - } - ] -} - variable "rename_rule_source_schema" { description = "The source schema we will rename to a target output 'space'" type = string @@ -81,19 +74,26 @@ variable "subnet_ids" { default = [] } -variable "source_address" {} +variable "source_address" { + type = string +} -variable "vpc" {} +variable "vpc" { + type = string +} variable "availability_zone" { + type = string default = null } variable "create" { + type = bool default = true } variable "create_iam_roles" { + type = bool default = true } @@ -106,11 +106,13 @@ variable "iam_role_permissions_boundary" { # Used in tagginga and naming the resources variable "stack_name" { + type = string description = "The name of our application" default = "dblink" } variable "owner" { + type = string description = "A group email address to be used in tags" default = "autobots@ga.gov.au" } @@ -120,6 +122,7 @@ variable "owner" { #-------------------------------------------------------------- variable "identifier" { + type = string default = "rds" description = "Name of the database in the RDS" } @@ -129,51 +132,42 @@ variable "identifier" { #-------------------------------------------------------------- variable "target_backup_retention_period" { + type = string # Days default = "30" description = "Retention of RDS backups" } variable "target_backup_window" { + type = string default = "14:00-17:00" description = "RDS backup window" } variable "target_db_port" { + type = number description = "The port the Application Server will access the database on" default = 5432 } variable "target_engine_version" { + type = string description = "Engine version" default = "9.3.14" } variable "target_instance_class" { + type = string default = "db.t2.micro" description = "Instance class" } variable "target_maintenance_window" { + type = string default = "Mon:00:00-Mon:03:00" description = "RDS maintenance window" } -variable "target_rds_is_multi_az" { - description = "Create backup database in separate availability zone" - default = "false" -} - -variable "target_storage" { - default = "10" - description = "Storage size in GB" -} - -variable "target_storage_encrypted" { - description = "Encrypt storage or leave unencrypted" - default = false -} - #variable "target_username" { # description = "Username to access the target database" #} @@ -183,81 +177,78 @@ variable "target_storage_encrypted" { #-------------------------------------------------------------- variable "source_app_password" { + type = string description = "Password for the endpoint to access the source database" } variable "source_app_username" { + type = string description = "Username for the endpoint to access the source database" } -variable "source_backup_retention_period" { - # Days - default = "1" - description = "Retention of RDS backups" -} - variable "source_backup_window" { + type = string # 12:00AM-03:00AM AEST default = "14:00-17:00" description = "RDS backup window" } variable "source_db_name" { + type = string description = "Name of the target database" default = "oracle" } variable "source_db_port" { + type = number description = "The port the Application Server will access the database on" default = null } variable "source_engine" { + type = string default = "oracle-se2" description = "Engine type, example values mysql, postgres" } variable "source_engine_name" { + type = string default = "" description = "Engine name for DMS" } variable "source_engine_version" { + type = string description = "Engine version" default = "12.1.0.2.v8" } variable "source_instance_class" { + type = string default = "db.t2.micro" description = "Instance class" } variable "source_maintenance_window" { + type = string default = "Mon:00:00-Mon:03:00" description = "RDS maintenance window" } variable "source_password" { + type = string description = "Password of the source database" default = "" } -variable "source_rds_is_multi_az" { - description = "Create backup database in separate availability zone" - default = "false" -} - -variable "source_storage" { - default = "10" - description = "Storage size in GB" -} - variable "source_storage_encrypted" { + type = bool description = "Encrypt storage or leave unencrypted" default = false } variable "source_username" { + type = string description = "Username to access the source database" default = "" } @@ -267,21 +258,25 @@ variable "source_username" { #-------------------------------------------------------------- variable "replication_instance_maintenance_window" { + type = string description = "Maintenance window for the replication instance" default = "sun:10:30-sun:14:30" } variable "replication_instance_storage" { + type = number description = "Size of the replication instance in GB" - default = "10" + default = 10 } variable "replication_instance_version" { + type = string description = "Engine version of the replication instance" default = "3.4.6" } variable "replication_instance_class" { + type = string description = "Instance class of replication instance" default = "dms.t2.micro" } @@ -297,6 +292,7 @@ variable "allow_major_version_upgrade" { #-------------------------------------------------------------- variable "database_subnet_cidr" { + type = list(string) default = ["10.26.25.208/28", "10.26.25.224/28", "10.26.25.240/28"] description = "List of subnets to be used for databases" } diff --git a/terraform/environments/digital-prison-reporting/modules/dms/versions.tf b/terraform/environments/digital-prison-reporting/modules/dms/versions.tf new file mode 100644 index 00000000000..bf68a137672 --- /dev/null +++ b/terraform/environments/digital-prison-reporting/modules/dms/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + aws = { + version = "~> 5.0" + source = "hashicorp/aws" + } + + template = { + source = "hashicorp/template" + version = "~> 2.2" + } + + } + required_version = "~> 1.0" +} diff --git a/terraform/environments/digital-prison-reporting/modules/dms_dps/main.tf b/terraform/environments/digital-prison-reporting/modules/dms_dps/main.tf index e003d334d96..f4061811053 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms_dps/main.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms_dps/main.tf @@ -135,6 +135,7 @@ resource "aws_dms_replication_subnet_group" "dms" { resource "aws_security_group" "dms_sec_group" { #checkov:skip=CKV_AWS_23: "Ensure every security group and rule has a description" + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" count = var.setup_dms_instance ? 1 : 0 name = "${var.project_id}-dms-${var.short_name}-${var.dms_source_name}-${var.dms_target_name}-security-group" diff --git a/terraform/environments/digital-prison-reporting/modules/dms_dps/variables.tf b/terraform/environments/digital-prison-reporting/modules/dms_dps/variables.tf index f3939ab9acb..cb7a96375bf 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms_dps/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms_dps/variables.tf @@ -1,4 +1,5 @@ variable "name" { + type = string description = "DMS Replication name." } @@ -52,38 +53,36 @@ variable "migration_type" { description = "DMS Migration Type" } -variable "availability_zones" { - default = [ - { - 0 = "eu-west-2a" - 1 = "eu-west-2b" - 2 = "eu-west-2c" - } - ] -} - - variable "subnet_ids" { description = "An List of VPC subnet IDs to use in the subnet group" type = list(string) default = [] } -variable "source_address" {} +variable "source_address" { + type = string +} -variable "vpc" {} +variable "vpc" { + type = string +} -variable "kinesis_stream_policy" {} +variable "kinesis_stream_policy" { + type = string +} variable "availability_zone" { + type = string default = null } variable "create" { + type = bool default = true } variable "create_iam_roles" { + type = bool default = true } @@ -96,11 +95,13 @@ variable "iam_role_permissions_boundary" { # Used in tagginga and naming the resources variable "stack_name" { + type = string description = "The name of our application" default = "dblink" } variable "owner" { + type = string description = "A group email address to be used in tags" default = "autobots@ga.gov.au" } @@ -110,6 +111,7 @@ variable "owner" { #-------------------------------------------------------------- variable "identifier" { + type = string default = "rds" description = "Name of the database in the RDS" } @@ -118,13 +120,8 @@ variable "identifier" { # DMS target config #-------------------------------------------------------------- -variable "target_backup_retention_period" { - # Days - default = "30" - description = "Retention of RDS backups" -} - variable "target_backup_window" { + type = string # 12:00AM-03:00AM AEST default = "14:00-17:00" description = "RDS backup window" @@ -135,26 +132,31 @@ variable "target_backup_window" { #} variable "target_db_port" { + type = number description = "The port the Application Server will access the database on" default = 5432 } variable "target_engine" { + type = string default = "kinesis" description = "Engine type, example values mysql, postgres" } variable "target_engine_version" { + type = string description = "Engine version" default = "9.3.14" } variable "target_instance_class" { + type = string default = "db.t2.micro" description = "Instance class" } variable "target_maintenance_window" { + type = string default = "Mon:00:00-Mon:03:00" description = "RDS maintenance window" } @@ -163,21 +165,6 @@ variable "target_maintenance_window" { # description = "Password of the target database" #} -variable "target_rds_is_multi_az" { - description = "Create backup database in separate availability zone" - default = "false" -} - -variable "target_storage" { - default = "10" - description = "Storage size in GB" -} - -variable "target_storage_encrypted" { - description = "Encrypt storage or leave unencrypted" - default = false -} - #variable "target_username" { # description = "Username to access the target database" #} @@ -192,81 +179,78 @@ variable "kinesis_settings" { #-------------------------------------------------------------- variable "source_app_password" { + type = string description = "Password for the endpoint to access the source database" } variable "source_app_username" { + type = string description = "Username for the endpoint to access the source database" } -variable "source_backup_retention_period" { - # Days - default = "1" - description = "Retention of RDS backups" -} - variable "source_backup_window" { + type = string # 12:00AM-03:00AM AEST default = "14:00-17:00" description = "RDS backup window" } variable "source_db_name" { + type = string description = "Name of the target database" default = "oracle" } variable "source_db_port" { + type = number description = "The port the Application Server will access the database on" default = null } variable "source_engine" { + type = string default = "oracle-se2" description = "Engine type, example values mysql, postgres" } variable "source_engine_name" { + type = string default = "" description = "Engine name for DMS" } variable "source_engine_version" { + type = string description = "Engine version" default = "12.1.0.2.v8" } variable "source_instance_class" { + type = string default = "db.t2.micro" description = "Instance class" } variable "source_maintenance_window" { + type = string default = "Mon:00:00-Mon:03:00" description = "RDS maintenance window" } variable "source_password" { + type = string description = "Password of the source database" default = "" } -variable "source_rds_is_multi_az" { - description = "Create backup database in separate availability zone" - default = "false" -} - -variable "source_storage" { - default = "10" - description = "Storage size in GB" -} - variable "source_storage_encrypted" { + type = bool description = "Encrypt storage or leave unencrypted" default = false } variable "source_username" { + type = string description = "Username to access the source database" default = "" } @@ -276,21 +260,25 @@ variable "source_username" { #-------------------------------------------------------------- variable "replication_instance_maintenance_window" { + type = string description = "Maintenance window for the replication instance" default = "sun:10:30-sun:14:30" } variable "replication_instance_storage" { + type = number description = "Size of the replication instance in GB" - default = "10" + default = 10 } variable "replication_instance_version" { + type = string description = "Engine version of the replication instance" default = "3.4.6" } variable "replication_instance_class" { + type = string description = "Instance class of replication instance" default = "dms.t2.micro" } @@ -300,6 +288,7 @@ variable "replication_instance_class" { #-------------------------------------------------------------- variable "database_subnet_cidr" { + type = list(string) default = ["10.26.25.208/28", "10.26.25.224/28", "10.26.25.240/28"] description = "List of subnets to be used for databases" } diff --git a/terraform/environments/digital-prison-reporting/modules/dms_dps/versions.tf b/terraform/environments/digital-prison-reporting/modules/dms_dps/versions.tf new file mode 100644 index 00000000000..bf68a137672 --- /dev/null +++ b/terraform/environments/digital-prison-reporting/modules/dms_dps/versions.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + aws = { + version = "~> 5.0" + source = "hashicorp/aws" + } + + template = { + source = "hashicorp/template" + version = "~> 2.2" + } + + } + required_version = "~> 1.0" +} diff --git a/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/main.tf b/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/main.tf index 7b1415d737a..0da948ae093 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/main.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/main.tf @@ -134,9 +134,15 @@ resource "aws_dms_endpoint" "dms-s3-target-source" { ssl_mode = var.source_ssl_mode username = var.source_app_username - postgres_settings { - map_boolean_as_boolean = true - heartbeat_enable = true + dynamic "postgres_settings" { + for_each = var.source_engine_name == "postgres" ? [1] : [] + + content { + map_boolean_as_boolean = true + fail_tasks_on_lob_truncation = true + heartbeat_enable = true + heartbeat_frequency = 5 + } } extra_connection_attributes = var.extra_attributes @@ -163,7 +169,6 @@ resource "aws_dms_s3_endpoint" "dms-s3-target-endpoint" { max_file_size = 120000 cdc_max_batch_interval = 10 - cdc_inserts_and_updates = true tags = merge( var.tags, diff --git a/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/variables.tf b/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/variables.tf index 346d5f716d3..a27b4da5ffe 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/variables.tf @@ -291,7 +291,7 @@ variable "source_engine" { variable "source_engine_name" { default = "" type = string - description = "Engine name for DMS" + description = "Type of engine for the source endpoint. Example valid values are postgres, oracle" } diff --git a/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/versions.tf b/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/versions.tf index d2163a87985..bf68a137672 100644 --- a/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/versions.tf +++ b/terraform/environments/digital-prison-reporting/modules/dms_s3_v2/versions.tf @@ -6,7 +6,7 @@ terraform { } template = { - source = "hashicorp/template" + source = "hashicorp/template" version = "~> 2.2" } diff --git a/terraform/environments/digital-prison-reporting/modules/domains/dms-endpoints/variables.tf b/terraform/environments/digital-prison-reporting/modules/domains/dms-endpoints/variables.tf index aa7f9023442..a2dc57b6c98 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/dms-endpoints/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/dms-endpoints/variables.tf @@ -117,7 +117,7 @@ variable "identifier" { #-------------------------------------------------------------- variable "target_backup_retention_period" { - type = string + type = string # Days default = "30" description = "Retention of RDS backups" diff --git a/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/pipeline.tf b/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/pipeline.tf index 4281d701e6f..e894ac6482b 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/pipeline.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/pipeline.tf @@ -26,11 +26,12 @@ module "maintenance_pipeline" { }, "Check All Pending Files Have Been Processed" : { "Type" : "Task", - "Resource" : "arn:aws:states:::glue:startJobRun", + "Resource" : "arn:aws:states:::glue:startJobRun.sync", "Parameters" : { "JobName" : var.glue_unprocessed_raw_files_check_job, "Arguments" : { - "--dpr.orchestration.wait.interval.seconds" : "60", + "--dpr.orchestration.wait.interval.seconds" : tostring(var.processed_files_check_wait_interval_seconds), + "--dpr.orchestration.max.attempts" : tostring(var.processed_files_check_max_attempts), "--dpr.datastorage.retry.maxAttempts" : tostring(var.glue_s3_max_attempts), "--dpr.datastorage.retry.minWaitMillis" : tostring(var.glue_s3_retry_min_wait_millis), "--dpr.datastorage.retry.maxWaitMillis" : tostring(var.glue_s3_retry_max_wait_millis) diff --git a/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/variables.tf b/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/variables.tf index a16130e7ee9..02ac21df6d3 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/maintenance-pipeline/variables.tf @@ -157,6 +157,16 @@ variable "retention_curated_num_workers" { } } +variable "processed_files_check_wait_interval_seconds" { + description = "Amount of seconds between checks to s3 if all files have been processed" + type = number +} + +variable "processed_files_check_max_attempts" { + description = "Maximum number of attempts to check if all files have been processed" + type = number +} + variable "glue_s3_max_attempts" { description = "The maximum number of attempts when making requests to S3" type = number diff --git a/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/pipeline.tf b/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/pipeline.tf index 0c08a0b111b..5ca48bbef99 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/pipeline.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/pipeline.tf @@ -49,11 +49,12 @@ module "reload_pipeline" { }, "Check All Pending Files Have Been Processed" : { "Type" : "Task", - "Resource" : "arn:aws:states:::glue:startJobRun", + "Resource" : "arn:aws:states:::glue:startJobRun.sync", "Parameters" : { "JobName" : var.glue_unprocessed_raw_files_check_job, "Arguments" : { - "--dpr.orchestration.wait.interval.seconds" : "60" + "--dpr.orchestration.wait.interval.seconds" : tostring(var.processed_files_check_wait_interval_seconds), + "--dpr.orchestration.max.attempts" : tostring(var.processed_files_check_max_attempts) } }, "Next" : "Stop Glue Streaming Job" diff --git a/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/variables.tf b/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/variables.tf index ea22b35cf8c..28a3b7a73ac 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/reload-pipeline/variables.tf @@ -238,6 +238,16 @@ variable "retention_curated_num_workers" { } } +variable "processed_files_check_wait_interval_seconds" { + description = "Amount of seconds between checks to s3 if all files have been processed" + type = number +} + +variable "processed_files_check_max_attempts" { + description = "Maximum number of attempts to check if all files have been processed" + type = number +} + variable "glue_s3_max_attempts" { description = "The maximum number of attempts when making requests to S3" type = number diff --git a/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/pipeline.tf b/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/pipeline.tf index ea6f367fa08..2042a0502b8 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/pipeline.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/pipeline.tf @@ -48,11 +48,12 @@ module "replay_pipeline" { }, "Check All Pending Files Have Been Processed" : { "Type" : "Task", - "Resource" : "arn:aws:states:::glue:startJobRun", + "Resource" : "arn:aws:states:::glue:startJobRun.sync", "Parameters" : { "JobName" : var.glue_unprocessed_raw_files_check_job, "Arguments" : { - "--dpr.orchestration.wait.interval.seconds" : "60" + "--dpr.orchestration.wait.interval.seconds" : tostring(var.processed_files_check_wait_interval_seconds), + "--dpr.orchestration.max.attempts" : tostring(var.processed_files_check_max_attempts) } }, "Next" : "Stop Glue Streaming Job" @@ -246,11 +247,12 @@ module "replay_pipeline" { }, "Check All Files Have Been Replayed" : { "Type" : "Task", - "Resource" : "arn:aws:states:::glue:startJobRun", + "Resource" : "arn:aws:states:::glue:startJobRun.sync", "Parameters" : { "JobName" : var.glue_unprocessed_raw_files_check_job, "Arguments" : { - "--dpr.orchestration.wait.interval.seconds" : "60" + "--dpr.orchestration.wait.interval.seconds" : tostring(var.processed_files_check_wait_interval_seconds), + "--dpr.orchestration.max.attempts" : tostring(var.processed_files_check_max_attempts) } }, "Next" : "Empty Raw Data" diff --git a/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/variables.tf b/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/variables.tf index f84fccb9b6c..b2d5ad19292 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/replay-pipeline/variables.tf @@ -235,6 +235,16 @@ variable "retention_curated_num_workers" { } } +variable "processed_files_check_wait_interval_seconds" { + description = "Amount of seconds between checks to s3 if all files have been processed" + type = number +} + +variable "processed_files_check_max_attempts" { + description = "Maximum number of attempts to check if all files have been processed" + type = number +} + variable "glue_s3_max_attempts" { description = "The maximum number of attempts when making requests to S3" type = number diff --git a/terraform/environments/digital-prison-reporting/modules/domains/start-cdc-pipeline/versions.tf b/terraform/environments/digital-prison-reporting/modules/domains/start-cdc-pipeline/versions.tf new file mode 100644 index 00000000000..ea265eb2f9b --- /dev/null +++ b/terraform/environments/digital-prison-reporting/modules/domains/start-cdc-pipeline/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + aws = { + version = "~> 5.0" + source = "hashicorp/aws" + } + + } + required_version = "~> 1.0" +} diff --git a/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/pipeline.tf b/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/pipeline.tf index 92b1179f255..d0ef2ef0519 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/pipeline.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/pipeline.tf @@ -25,12 +25,12 @@ module "cdc_stop_pipeline" { }, "Check All Pending Files Have Been Processed" : { "Type" : "Task", - "Resource" : "arn:aws:states:::glue:startJobRun", + "Resource" : "arn:aws:states:::glue:startJobRun.sync", "Parameters" : { "JobName" : var.glue_unprocessed_raw_files_check_job, "Arguments" : { - "--dpr.orchestration.wait.interval.seconds" : "60" - "--dpr.orchestration.max.attempts" : "120" + "--dpr.orchestration.wait.interval.seconds" : tostring(var.processed_files_check_wait_interval_seconds), + "--dpr.orchestration.max.attempts" : tostring(var.processed_files_check_max_attempts) } }, "Next" : "Stop Glue Streaming Job" diff --git a/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/variables.tf b/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/variables.tf index 22af2a709f4..d6d60a9ca57 100644 --- a/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/variables.tf @@ -24,11 +24,6 @@ variable "glue_reporting_hub_cdc_jobname" { type = string } -variable "s3_glue_bucket_id" { - description = "S3, Glue Bucket ID" - type = string -} - variable "glue_stop_glue_instance_job" { description = "Name of job to stop the current running instance of the streaming job" type = string @@ -44,13 +39,18 @@ variable "glue_unprocessed_raw_files_check_job" { type = string } +variable "processed_files_check_wait_interval_seconds" { + description = "Amount of seconds between checks to s3 if all files have been processed" + type = number +} + +variable "processed_files_check_max_attempts" { + description = "Maximum number of attempts to check if all files have been processed" + type = number +} + variable "tags" { type = map(string) default = {} description = "(Optional) Key-value map of resource tags" -} - -variable "domain" { - type = string - description = "Domain Name" } \ No newline at end of file diff --git a/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/versions.tf b/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/versions.tf new file mode 100644 index 00000000000..ea265eb2f9b --- /dev/null +++ b/terraform/environments/digital-prison-reporting/modules/domains/stop-cdc-pipeline/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_providers { + aws = { + version = "~> 5.0" + source = "hashicorp/aws" + } + + } + required_version = "~> 1.0" +} diff --git a/terraform/environments/digital-prison-reporting/modules/rds/postgres/sg.tf b/terraform/environments/digital-prison-reporting/modules/rds/postgres/sg.tf index 2403b86bce7..7328ffb11a8 100644 --- a/terraform/environments/digital-prison-reporting/modules/rds/postgres/sg.tf +++ b/terraform/environments/digital-prison-reporting/modules/rds/postgres/sg.tf @@ -37,6 +37,7 @@ resource "aws_security_group_rule" "rule" { } resource "aws_security_group_rule" "rds_allow_all" { + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" count = var.enable_rds ? 1 : 0 type = "egress" diff --git a/terraform/environments/digital-prison-reporting/modules/s3_bucket/main.tf b/terraform/environments/digital-prison-reporting/modules/s3_bucket/main.tf index 4cf22f0992a..4ecf5830546 100644 --- a/terraform/environments/digital-prison-reporting/modules/s3_bucket/main.tf +++ b/terraform/environments/digital-prison-reporting/modules/s3_bucket/main.tf @@ -31,77 +31,75 @@ resource "aws_s3_bucket_public_access_block" "storage" { restrict_public_buckets = true } -# Resource to define S3 bucket lifecycle configuration resource "aws_s3_bucket_lifecycle_configuration" "lifecycle" { - # Enable the lifecycle configuration only if the variable `enable_lifecycle` is true - count = var.enable_lifecycle ? 1 : 0 + # Create the lifecycle configuration if either lifecycle or Intelligent-Tiering is enabled + count = var.enable_lifecycle || var.enable_intelligent_tiering ? 1 : 0 + bucket = aws_s3_bucket.storage[0].id - # Main lifecycle rule for standard categories (short_term, long_term, temporary) - rule { - id = var.name - status = "Enabled" - - # Short-Term Retention Policy - # - Transitions objects to STANDARD_IA after 30 days (cost-effective storage for infrequent access). - # - Deletes objects after 90 days. - dynamic "transition" { - for_each = var.lifecycle_category == "short_term" ? [ { days = 30, storage_class = "STANDARD_IA" } ] : [] - content { - days = transition.value.days - storage_class = transition.value.storage_class + # Main lifecycle rule for standard categories (short_term, long_term, temporary, standard) + dynamic "rule" { + for_each = var.enable_lifecycle ? [1] : [] + content { + id = var.name + status = "Enabled" + + # Short-Term Retention Policy + dynamic "transition" { + for_each = var.lifecycle_category == "short_term" ? [{ days = 30, storage_class = "STANDARD_IA" }] : [] + content { + days = transition.value.days + storage_class = transition.value.storage_class + } } - } - dynamic "expiration" { - for_each = var.lifecycle_category == "short_term" ? [ { days = 90 } ] : ( - var.lifecycle_category == "temporary" ? [ { days = 30 } ] : []) - content { - days = expiration.value.days + # Standard Retention Policy: Move to STANDARD_IA after 30 days and remain there indefinitely + dynamic "transition" { + for_each = var.lifecycle_category == "standard" ? [{ days = 30, storage_class = "STANDARD_IA" }] : [] + content { + days = transition.value.days + storage_class = transition.value.storage_class + } + } + + # Expiration logic for short-term and temporary categories + dynamic "expiration" { + for_each = var.lifecycle_category == "short_term" ? [{ days = 90 }] : ( + var.lifecycle_category == "temporary" ? [{ days = 30 }] : []) + content { + days = expiration.value.days + } } - } - # Long-Term Retention Policy - # - Transitions objects to progressively cheaper storage classes: - # - STANDARD_IA after 60 days. - # - GLACIER after 180 days. - # - DEEP_ARCHIVE after 365 days. - # - Does not delete objects (no expiration). - dynamic "transition" { - for_each = var.lifecycle_category == "long_term" ? [ - { days = 60, storage_class = "STANDARD_IA" }, - { days = 180, storage_class = "GLACIER" }, - { days = 365, storage_class = "DEEP_ARCHIVE" } - ] : [] - content { - days = transition.value.days - storage_class = transition.value.storage_class + # Long-Term Retention Policy + dynamic "transition" { + for_each = var.lifecycle_category == "long_term" ? [ + { days = 30, storage_class = "STANDARD_IA" }, + { days = 180, storage_class = "GLACIER" }, + { days = 365, storage_class = "DEEP_ARCHIVE" } + ] : [] + content { + days = transition.value.days + storage_class = transition.value.storage_class + } } } } - # Dynamic rule for custom expiration rules - # - Allows adding additional lifecycle policies dynamically using the `override_expiration_rules` variable. - # - Each custom rule is defined with: - # - A unique prefix to filter objects (e.g., "reports/", "dpr/"). - # - An expiration time in days for objects under that prefix. - # - The `id` for each rule is derived dynamically based on the prefix (slashes `/` are replaced with dashes `-` for compatibility). - # - Rules are enabled or disabled based on the `enable_lifecycle_expiration` variable. - dynamic "rule" { - for_each = var.override_expiration_rules - content { - # Generate rule ID without worrying about trailing slashes in the prefix - id = "${var.name}-${rule.value.prefix}" - status = var.enable_lifecycle_expiration ? "Enabled" : "Disabled" + # Intelligent-Tiering rule (applied if enable_intelligent_tiering is true) + rule { + id = "${var.name}-intelligent-tiering" + status = var.enable_intelligent_tiering ? "Enabled" : "Disabled" - filter { - # Append '/' directly in the filter block to ensure proper prefix format - prefix = "${rule.value.prefix}/" - } + filter { + # Apply to all objects + prefix = "" + } - expiration { - days = rule.value.days - } + transition { + # Move objects to Intelligent-Tiering storage class + days = 0 # Immediately move to Intelligent-Tiering + storage_class = "INTELLIGENT_TIERING" } } } diff --git a/terraform/environments/digital-prison-reporting/modules/s3_bucket/variables.tf b/terraform/environments/digital-prison-reporting/modules/s3_bucket/variables.tf index 71b57b9545f..683e34f1677 100644 --- a/terraform/environments/digital-prison-reporting/modules/s3_bucket/variables.tf +++ b/terraform/environments/digital-prison-reporting/modules/s3_bucket/variables.tf @@ -115,7 +115,7 @@ variable "bucket_key" { default = true } -## Dynamic override_expiration_rules +## Dynamic override_expiration_rules variable "override_expiration_rules" { type = list(object({ prefix = string, days = number })) default = [] @@ -123,10 +123,16 @@ variable "override_expiration_rules" { variable "lifecycle_category" { type = string - default = "long_term" # Options: "short_term", "long_term", "temporary" + default = "standard" # Options: "short_term", "long_term", "temporary", "standard" } variable "enable_lifecycle_expiration" { description = "Enable item expiration - requires 'enable_lifecycle' and 'override_expiration_rules' to be defined/enabled." default = false +} + +variable "enable_intelligent_tiering" { + description = "Enable Intelligent-Tiering storage class for S3 bucket" + type = bool + default = false } \ No newline at end of file diff --git a/terraform/environments/digital-prison-reporting/modules/s3_bucket/versions.tf b/terraform/environments/digital-prison-reporting/modules/s3_bucket/versions.tf new file mode 100644 index 00000000000..14c498acddc --- /dev/null +++ b/terraform/environments/digital-prison-reporting/modules/s3_bucket/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + aws = { + version = "~> 5.0" + source = "hashicorp/aws" + } + } + required_version = "~> 1.0" +} diff --git a/terraform/environments/digital-prison-reporting/observability-platform.tf b/terraform/environments/digital-prison-reporting/observability-platform.tf index 58035f4b337..17803453dc6 100644 --- a/terraform/environments/digital-prison-reporting/observability-platform.tf +++ b/terraform/environments/digital-prison-reporting/observability-platform.tf @@ -5,9 +5,9 @@ module "observability_platform_tenant" { source = "ministryofjustice/observability-platform-tenant/aws" version = "1.2.0" - observability_platform_account_id = local.environment_configuration.observability_platform_account_id + observability_platform_account_id = local.environment_management.account_ids["observability-platform-production"] enable_xray = true enable_prometheus = true tags = local.tags -} \ No newline at end of file +} diff --git a/terraform/environments/digital-prison-reporting/sg.tf b/terraform/environments/digital-prison-reporting/sg.tf index a00ec259001..20d12db345c 100644 --- a/terraform/environments/digital-prison-reporting/sg.tf +++ b/terraform/environments/digital-prison-reporting/sg.tf @@ -40,6 +40,7 @@ resource "aws_security_group_rule" "lambda_ingress_generic" { } resource "aws_security_group_rule" "lambda_egress_generic" { + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" count = local.enable_generic_lambda_sg ? 1 : 0 type = "egress" @@ -88,6 +89,7 @@ resource "aws_security_group_rule" "serverless_gw_ingress" { } resource "aws_security_group_rule" "serverless_gw_egress" { + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" count = local.enable_dbuilder_serverless_gw ? 1 : 0 type = "egress" @@ -102,6 +104,7 @@ resource "aws_security_group_rule" "serverless_gw_egress" { # VPC Gateway Endpoint SG resource "aws_security_group" "gateway_endpoint_sg" { #checkov:skip=CKV_AWS_23: "Ensure every security group and rule has a description" + #checkov:skip=CKV_AWS_382: "Ensure no security groups allow egress from 0.0.0.0:0 to port -1" count = local.include_dbuilder_gw_vpclink ? 1 : 0 diff --git a/terraform/environments/edw/ec2.tf b/terraform/environments/edw/ec2.tf index 73dd891836b..480b063eb69 100644 --- a/terraform/environments/edw/ec2.tf +++ b/terraform/environments/edw/ec2.tf @@ -227,24 +227,30 @@ else fi #### Prevent timeout on DB + +# Increase ssh session timeout +sed -i 's/#ClientAliveInterval.*/ClientAliveInterval 1200/' /etc/ssh/sshd_config +sed -i 's/#ClientAliveCountMax.*/ClientAliveCountMax 5/' /etc/ssh/sshd_config +service sshd restart + + # Add TCP keepalive time to sysctl.conf ---> keepalive solution -echo "net.ipv4.tcp_keepalive_time = 300" >> /etc/sysctl.conf +echo "net.ipv4.tcp_keepalive_time = 120" >> /etc/sysctl.conf sysctl -p + # Add SQLNET.EXPIRE_TIME to sqlnet.ora ---> keepalive solution # Check if SQLNET.EXPIRE_TIME exists in the file and update it, otherwise add it if grep -q "^SQLNET.EXPIRE_TIME" /oracle/software/product/10.2.0/network/admin/sqlnet.ora; then - # If the line exists, update it to "SQLNET.EXPIRE_TIME = 1" - sed -i 's/^SQLNET\.EXPIRE_TIME.*/SQLNET.EXPIRE_TIME = 1/' /oracle/software/product/10.2.0/network/admin/sqlnet.ora + # If the line exists, update it to "SQLNET.EXPIRE_TIME = 2" + sed -i 's/^SQLNET\.EXPIRE_TIME.*/SQLNET.EXPIRE_TIME = 2/' /oracle/software/product/10.2.0/network/admin/sqlnet.ora else # If the line does not exist, append it to the end of the file echo "SQLNET.EXPIRE_TIME = 1" >> /oracle/software/product/10.2.0/network/admin/sqlnet.ora fi + # Modify tnsnames.ora to insert (ENABLE=broken) ---> keepalive solution -grep -q '(ENABLE *= *broken)' /oracle/software/product/10.2.0/network/admin/tnsnames.ora || sed -i '/(DESCRIPTION =/a\\ (ENABLE = broken)' /oracle/software/product/10.2.0/network/admin/tnsnames.ora -# Add inbound connection timeout option to sqlnet -grep -qxF "SQLNET.INBOUND_CONNECT_TIMEOUT = 0" /oracle/software/product/10.2.0/network/admin/sqlnet.ora || echo "SQLNET.INBOUND_CONNECT_TIMEOUT = 0" >> /oracle/software/product/10.2.0/network/admin/sqlnet.ora -# Add inbound connection timeout option to listener -grep -qxF "INBOUND_CONNECT_TIMEOUT_LISTENER = 0" /oracle/software/product/10.2.0/network/admin/listener.ora || echo "INBOUND_CONNECT_TIMEOUT_LISTENER = 0" >> /oracle/software/product/10.2.0/network/admin/listener.ora +grep -q '(ENABLE = broken)' /oracle/software/product/10.2.0/network/admin/tnsnames.ora || sed -i '/(DESCRIPTION =/a\\ (ENABLE = broken)' /oracle/software/product/10.2.0/network/admin/tnsnames.ora + sudo mkdir -p /var/opt/oracle chown oracle:dba /var/opt/oracle @@ -287,6 +293,7 @@ cat < /etc/cron.d/backup_cron 0 06 * * 01 /home/oracle/backup_scripts/rman_full_backup.sh $APPNAME 00 07,10,13,16 * * * /home/oracle/scripts/freespace_alert.sh 00,15,30,45 * * * * /home/oracle/scripts/pmon_check.sh +# 0 7 * * 1 /home/oracle/scripts/maat_05365_ware_db_changes.sh EOC3 chown root:root /etc/cron.d/backup_cron @@ -347,7 +354,7 @@ EOF ####### IAM role ####### resource "aws_iam_role" "edw_ec2_role" { - name = "${local.application_name}-ec2-instance-role" + name = "${local.application_name}-ec2-instance-role" tags = merge( local.tags, { diff --git a/terraform/environments/electronic-monitoring-data/ap_airflow_iam.tf b/terraform/environments/electronic-monitoring-data/ap_airflow_iam.tf index 46796740c63..0ec98bf90a6 100644 --- a/terraform/environments/electronic-monitoring-data/ap_airflow_iam.tf +++ b/terraform/environments/electronic-monitoring-data/ap_airflow_iam.tf @@ -82,30 +82,32 @@ module "load_cap_dw_database" { count = local.is-production ? 1 : 0 source = "./modules/ap_airflow_load_data_iam_role" - name = "cap-dw" - environment = local.environment - database_name = "g4s-cap-dw" - path_to_data = "/g4s_cap_dw" - source_data_bucket = module.s3-dms-target-store-bucket.bucket - secret_code = jsondecode(data.aws_secretsmanager_secret_version.airflow_secret.secret_string)["oidc_cluster_identifier"] - oidc_arn = aws_iam_openid_connect_provider.analytical_platform_compute.arn - athena_dump_bucket = module.s3-athena-bucket.bucket - cadt_bucket = module.s3-create-a-derived-table-bucket.bucket + name = "cap-dw" + environment = local.environment + database_name = "g4s-cap-dw" + path_to_data = "/g4s_cap_dw" + source_data_bucket = module.s3-dms-target-store-bucket.bucket + secret_code = jsondecode(data.aws_secretsmanager_secret_version.airflow_secret.secret_string)["oidc_cluster_identifier"] + oidc_arn = aws_iam_openid_connect_provider.analytical_platform_compute.arn + athena_dump_bucket = module.s3-athena-bucket.bucket + cadt_bucket = module.s3-create-a-derived-table-bucket.bucket + max_session_duration = 12 * 60 * 60 } module "load_emsys_mvp_database" { count = local.is-production ? 1 : 0 source = "./modules/ap_airflow_load_data_iam_role" - name = "emsys-mvp" - environment = local.environment - database_name = "g4s-emsys-mvp" - path_to_data = "/g4s_emsys_mvp" - source_data_bucket = module.s3-dms-target-store-bucket.bucket - secret_code = jsondecode(data.aws_secretsmanager_secret_version.airflow_secret.secret_string)["oidc_cluster_identifier"] - oidc_arn = aws_iam_openid_connect_provider.analytical_platform_compute.arn - athena_dump_bucket = module.s3-athena-bucket.bucket - cadt_bucket = module.s3-create-a-derived-table-bucket.bucket + name = "emsys-mvp" + environment = local.environment + database_name = "g4s-emsys-mvp" + path_to_data = "/g4s_emsys_mvp" + source_data_bucket = module.s3-dms-target-store-bucket.bucket + secret_code = jsondecode(data.aws_secretsmanager_secret_version.airflow_secret.secret_string)["oidc_cluster_identifier"] + oidc_arn = aws_iam_openid_connect_provider.analytical_platform_compute.arn + athena_dump_bucket = module.s3-athena-bucket.bucket + cadt_bucket = module.s3-create-a-derived-table-bucket.bucket + max_session_duration = 12 * 60 * 60 } module "load_fep_database" { @@ -197,3 +199,19 @@ module "load_unstructured_atrium_database" { athena_dump_bucket = module.s3-athena-bucket.bucket cadt_bucket = module.s3-create-a-derived-table-bucket.bucket } + + +module "load_fms" { + count = local.is-test || local.is-production ? 1 : 0 + source = "./modules/ap_airflow_load_data_iam_role" + + name = "fms" + environment = local.environment + database_name = "serco-fms" + path_to_data = "/serco/fms" + source_data_bucket = module.s3-raw-formatted-data-bucket.bucket + secret_code = jsondecode(data.aws_secretsmanager_secret_version.airflow_secret.secret_string)["oidc_cluster_identifier"] + oidc_arn = aws_iam_openid_connect_provider.analytical_platform_compute.arn + athena_dump_bucket = module.s3-athena-bucket.bucket + cadt_bucket = module.s3-create-a-derived-table-bucket.bucket +} diff --git a/terraform/environments/electronic-monitoring-data/cloud_platform_share.tf b/terraform/environments/electronic-monitoring-data/cloud_platform_share.tf new file mode 100644 index 00000000000..e3bd9c71f9d --- /dev/null +++ b/terraform/environments/electronic-monitoring-data/cloud_platform_share.tf @@ -0,0 +1,69 @@ +locals { + # Setting the IAM name that our Cloud Platform API will use to connect to this role + + iam-dev = local.environment_shorthand == "dev" ? var.cloud-platform-iam-dev : "" + iam-test = local.environment_shorthand == "test" ? var.cloud-platform-iam-preprod : "" + iam-preprod = local.environment_shorthand == "preprod" ? var.cloud-platform-iam-preprod : "" + iam-prod = local.environment_shorthand == "prod" ? var.cloud-platform-iam-prod : "" + + resolved-cloud-platform-iam-role = coalesce(local.iam-dev, local.iam-test, local.iam-preprod, local.iam-prod) +} + +variable "cloud-platform-iam-dev" { + type = string + description = "IAM role that our API in Cloud Platform will use to connect to this role." + default = "arn:aws:iam::754256621582:role/cloud-platform-irsa-6ab6c596b45e90b3-live" +} + +variable "cloud-platform-iam-preprod" { + type = string + description = "IAM role that our API in Cloud Platform will use to connect to this role." + default = "arn:aws:iam::754256621582:role/cloud-platform-irsa-bca231f5681d29c6-live" +} + +variable "cloud-platform-iam-prod" { + type = string + description = "IAM role that our API in Cloud Platform will use to connect to this role." + default = "arn:aws:iam::754256621582:role/cloud-platform-irsa-7a81f92a48491ef0-live" +} + +module "cmt_front_end_assumable_role" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role" + version = "5.48.0" + + trusted_role_arns = [ + local.resolved-cloud-platform-iam-role + ] + + create_role = true + role_requires_mfa = false + + role_name = "cmt_read_emds_data_${local.environment_shorthand}" + + tags = local.tags +} + +# module "share_api_data_marts" { +# #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions +# #checkov:skip=CKV_TF_2:Module registry does not support tags for versions +# source = "github.com/ministryofjustice/terraform-aws-analytical-platform-lakeformation?ref=32525da937012178e430585ac5a00f05193f58eb" +# data_locations = [{ +# data_location = module.s3-create-a-derived-table-bucket.bucket.arn +# register = true +# share = true +# hybrid_mode = false # will be managed exclusively in LakeFormation +# principal = module.cmt_front_end_assumable_role.iam_role_arn +# }] + +# databases_to_share = [{ +# name = "api_data_marts" +# principal = module.cmt_front_end_assumable_role.iam_role_arn +# }] + +# providers = { +# aws.source = aws +# aws.destination = aws +# } +# } diff --git a/terraform/environments/electronic-monitoring-data/dms_data_validation_glue_job_v2.tf b/terraform/environments/electronic-monitoring-data/dms_data_validation_glue_job_v2.tf index a36b3d400fd..787fc2183e2 100644 --- a/terraform/environments/electronic-monitoring-data/dms_data_validation_glue_job_v2.tf +++ b/terraform/environments/electronic-monitoring-data/dms_data_validation_glue_job_v2.tf @@ -29,31 +29,31 @@ resource "aws_glue_job" "dms_dv_rds_to_s3_parquet_v1" { worker_type = "G.1X" number_of_workers = 4 default_arguments = { - "--script_bucket_name" = module.s3-glue-job-script-bucket.bucket.id - "--rds_db_host_ep" = split(":", aws_db_instance.database_2022.endpoint)[0] - "--rds_db_pwd" = aws_db_instance.database_2022.password - "--rds_sqlserver_db" = "" - "--rds_sqlserver_db_schema" = "dbo" - "--rds_exclude_db_tbls" = "" - "--rds_select_db_tbls" = "" - "--rds_db_tbl_pkeys_col_list" = "" - "--rds_df_trim_str_columns" = "false" - "--rds_df_trim_micro_sec_ts_col_list" = "" - "--num_of_repartitions" = 0 - "--read_partition_size_mb" = 128 - "--max_table_size_mb" = 4000 - "--parquet_tbl_folder_if_different" = "" - "--extra-py-files" = "s3://${module.s3-glue-job-script-bucket.bucket.id}/${aws_s3_object.aws_s3_object_pyzipfile_to_s3folder.id}" - "--parquet_src_bucket_name" = module.s3-dms-target-store-bucket.bucket.id - "--parquet_output_bucket_name" = module.s3-dms-data-validation-bucket.bucket.id - "--glue_catalog_db_name" = aws_glue_catalog_database.dms_dv_glue_catalog_db.name - "--glue_catalog_tbl_name" = "glue_df_output" - "--continuous-log-logGroup" = "/aws-glue/jobs/${aws_cloudwatch_log_group.dms_dv_rds_to_s3_parquet_v1.name}" - "--enable-continuous-cloudwatch-log" = "true" - "--enable-continuous-log-filter" = "true" - "--enable-metrics" = "true" - "--enable-auto-scaling" = "true" - "--conf" = <> given_skip_columns_comparison_list = {given_skip_columns_comparison_list}<<""") + + select_compare_columns = [col for col in df_rds_temp.columns + if col not in given_skip_columns_comparison_list] + LOGGER.warn(f""">> Only the below selected columns are compared \n{select_compare_columns}<<""") + skip_columns_msg = f"""; columns_skipped = {given_skip_columns_comparison_list}""" + + final_select_columns = df_rds_temp.columns if select_compare_columns is None \ + else select_compare_columns + + df_rds_temp = df_rds_temp.select(*final_select_columns) df_rds_temp_t1 = df_rds_temp.selectExpr( *CustomPysparkMethods.get_nvl_select_list( df_rds_temp, @@ -249,34 +268,8 @@ def process_dv_for_table(rds_jdbc_conn_obj, trim_str_msg = "; [str column(s) - extra spaces trimmed]" t2_rds_str_col_trimmed = True # ------------------------------------------------------- - - trim_ts_ms_msg = "" - t3_rds_ts_col_msec_trimmed = False - if args.get("rds_df_trim_micro_sec_ts_col_list", None) is not None: - - msg_prefix = f"""Given -> rds_df_trim_micro_sec_ts_col_list = {given_rds_df_trim_micro_seconds_col_list}""" - given_rds_df_trim_micro_seconds_col_str = args["rds_df_trim_micro_sec_ts_col_list"] - given_rds_df_trim_micro_seconds_col_list = [f"""{col.strip().strip("'").strip('"')}""" - for col in given_rds_df_trim_micro_seconds_col_str.split(",")] - LOGGER.info(f"""{msg_prefix}, {type(given_rds_df_trim_micro_seconds_col_list)}""") - - if t2_rds_str_col_trimmed == True: - df_rds_temp_t3 = CustomPysparkMethods.rds_df_trim_microseconds_timestamp( - df_rds_temp_t2, - given_rds_df_trim_micro_seconds_col_list) - else: - df_rds_temp_t3 = CustomPysparkMethods.rds_df_trim_microseconds_timestamp( - df_rds_temp_t1, - given_rds_df_trim_micro_seconds_col_list) - # ------------------------------------------------------- - - trim_ts_ms_msg = "; [timestamp column(s) - micro-seconds trimmed]" - t3_rds_ts_col_msec_trimmed = True - # ------------------------------------------------------- - - if t3_rds_ts_col_msec_trimmed: - df_rds_temp_t4 = df_rds_temp_t3 - elif t2_rds_str_col_trimmed: + + if t2_rds_str_col_trimmed: df_rds_temp_t4 = df_rds_temp_t2 else: df_rds_temp_t4 = df_rds_temp_t1 @@ -284,6 +277,7 @@ def process_dv_for_table(rds_jdbc_conn_obj, df_rds_temp_t5 = df_rds_temp_t4.cache() + df_prq_temp = df_prq_temp.select(*final_select_columns) df_prq_temp_t1 = df_prq_temp.selectExpr( *CustomPysparkMethods.get_nvl_select_list( df_rds_temp, @@ -296,6 +290,7 @@ def process_dv_for_table(rds_jdbc_conn_obj, df_prq_temp_count = df_prq_temp_t1.count() # ------------------------------------------------------- + validated_msg = f"""{rds_tbl_name} - Validated.\n{skip_columns_msg}\n{trim_str_msg}""" if df_rds_temp_count == df_prq_temp_count: df_rds_prq_subtract_t1 = df_rds_temp_t5.subtract(df_prq_temp_t1) @@ -305,7 +300,7 @@ def process_dv_for_table(rds_jdbc_conn_obj, df_temp = df_dv_output.selectExpr( "current_timestamp as run_datetime", "'' as json_row", - f"""'{rds_tbl_name} - Validated.\n{trim_str_msg}\n{trim_ts_ms_msg}' as validation_msg""", + f""""{validated_msg}" as validation_msg""", f"""'{rds_db_name}' as database_name""", f"""'{db_sch_tbl}' as full_table_name""", """'False' as table_to_ap""" @@ -314,7 +309,10 @@ def process_dv_for_table(rds_jdbc_conn_obj, df_dv_output = df_dv_output.union(df_temp) else: df_subtract_temp = (df_rds_prq_subtract_t1 - .withColumn('json_row', F.to_json(F.struct(*[F.col(c) for c in df_rds_temp.columns]))) + .withColumn('json_row', + F.to_json( + F.struct(*[F.col(c) + for c in df_rds_temp.columns]))) .selectExpr("json_row") .limit(100)) @@ -322,7 +320,7 @@ def process_dv_for_table(rds_jdbc_conn_obj, df_subtract_temp = df_subtract_temp.selectExpr( "current_timestamp as run_datetime", "json_row", - f""""{subtract_validation_msg} - Dataframe(s)-Subtract Non-Zero Row Count!" as validation_msg""", + f""""{subtract_validation_msg}: - Rows not matched!" as validation_msg""", f"""'{rds_db_name}' as database_name""", f"""'{db_sch_tbl}' as full_table_name""", """'False' as table_to_ap""" @@ -536,7 +534,7 @@ def write_parquet_to_s3(df_dv_output: DataFrame, database, table): total_files, total_size = S3Methods.get_s3_folder_info( PRQ_FILES_SRC_S3_BUCKET_NAME, - f"{rds_db_name}/{rds_sqlserver_db_schema}/{rds_tbl_name}") + f"{rds_db_name}/{rds_sqlserver_db_schema}/{rds_tbl_name}/") total_size_mb = total_size/1024/1024 # ------------------------------------------------------- diff --git a/terraform/environments/electronic-monitoring-data/glue-job/dms_dv_rds_to_s3_parquet_v2.py b/terraform/environments/electronic-monitoring-data/glue-job/dms_dv_rds_to_s3_parquet_v2.py index 610779f27f1..3503518d11d 100644 --- a/terraform/environments/electronic-monitoring-data/glue-job/dms_dv_rds_to_s3_parquet_v2.py +++ b/terraform/environments/electronic-monitoring-data/glue-job/dms_dv_rds_to_s3_parquet_v2.py @@ -653,7 +653,7 @@ def write_parquet_to_s3(df_dv_output: DataFrame, database, db_sch_tbl_name): total_files, total_size = S3Methods.get_s3_folder_info( PRQ_FILES_SRC_S3_BUCKET_NAME, - f"{rds_db_name}/{rds_sqlserver_db_schema}/{rds_sqlserver_db_table}") + f"{rds_db_name}/{rds_sqlserver_db_schema}/{rds_sqlserver_db_table}/") total_size_mb = total_size/1024/1024 LOGGER.warn(f""">> '{db_sch_tbl}' Size: {total_size_mb} MB <<""") diff --git a/terraform/environments/electronic-monitoring-data/glue-job/etl_dv_rds_to_s3_parquet_partitionby_yyyy_mm.py b/terraform/environments/electronic-monitoring-data/glue-job/etl_dv_rds_to_s3_parquet_partitionby_yyyy_mm.py index f7db41b2546..1199cecfcb0 100644 --- a/terraform/environments/electronic-monitoring-data/glue-job/etl_dv_rds_to_s3_parquet_partitionby_yyyy_mm.py +++ b/terraform/environments/electronic-monitoring-data/glue-job/etl_dv_rds_to_s3_parquet_partitionby_yyyy_mm.py @@ -639,7 +639,7 @@ def write_to_s3_parquet(df_dv_output: DataFrame, LOGGER.info(f"""prq_table_folder_path = {prq_table_folder_path}""") total_files, total_size = S3Methods.get_s3_folder_info(PARQUET_OUTPUT_S3_BUCKET_NAME, - prq_table_folder_path) + f"{prq_table_folder_path}/") msg_part_1 = f"""> total_files={total_files}""" msg_part_2 = f"""> total_size_mb={total_size/1024/1024:.2f}""" LOGGER.info(f"""{msg_part_1}, {msg_part_2}""") diff --git a/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_sqlserver_query_to_s3_parquet.py b/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_sqlserver_query_to_s3_parquet.py new file mode 100644 index 00000000000..c3fdffe3607 --- /dev/null +++ b/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_sqlserver_query_to_s3_parquet.py @@ -0,0 +1,473 @@ + +import sys + +# from logging import getLogger +# import pandas as pd + +from glue_data_validation_lib import RDSConn_Constants +from glue_data_validation_lib import SparkSession +from glue_data_validation_lib import Logical_Constants +from glue_data_validation_lib import RDS_JDBC_CONNECTION +from glue_data_validation_lib import S3Methods +from glue_data_validation_lib import CustomPysparkMethods +from rds_transform_queries import SQLServer_Extract_Transform + +from awsglue.utils import getResolvedOptions +from awsglue.transforms import * + +from awsglue.dynamicframe import DynamicFrame +from awsglue.job import Job + +# from pyspark.conf import SparkConf +from pyspark.sql import DataFrame +import pyspark.sql.functions as F +import pyspark.sql.types as T + +# from pyspark.storagelevel import StorageLevel + +# =============================================================================== + +sc = SparkSession.sc +sc._jsc.hadoopConfiguration().set("spark.dynamicAllocation.enabled", "true") + +spark = SparkSession.spark + +glueContext = SparkSession.glueContext +LOGGER = glueContext.get_logger() + +# =============================================================================== + +# =============================================================================== + +# Organise capturing input parameters. +DEFAULT_INPUTS_LIST = ["JOB_NAME", + "script_bucket_name", + "rds_db_host_ep", + "rds_db_pwd", + "jdbc_read_partitions_num", + "rds_sqlserver_db", + "rds_sqlserver_db_schema", + "rds_sqlserver_db_table", + "rds_db_tbl_pkey_column", + "rds_df_repartition_num", + "rds_to_parquet_output_s3_bucket", + "validation_only_run", + "validation_sample_fraction_float", + "validation_sample_df_repartition_num", + "glue_catalog_db_name", + "glue_catalog_tbl_name", + "glue_catalog_dv_bucket" + ] + +OPTIONAL_INPUTS = [ + "rename_migrated_prq_tbl_folder" +] + +AVAILABLE_ARGS_LIST = CustomPysparkMethods.resolve_args(DEFAULT_INPUTS_LIST+OPTIONAL_INPUTS) + +args = getResolvedOptions(sys.argv, AVAILABLE_ARGS_LIST) + +# ------------------------------ + +job = Job(glueContext) +job.init(args["JOB_NAME"], args) + +# ------------------------------ + +RDS_DB_HOST_ENDPOINT = args["rds_db_host_ep"] +RDS_DB_PORT = RDSConn_Constants.RDS_DB_PORT +RDS_DB_INSTANCE_USER = RDSConn_Constants.RDS_DB_INSTANCE_USER +RDS_DB_INSTANCE_PWD = args["rds_db_pwd"] +RDS_DB_INSTANCE_DRIVER = RDSConn_Constants.RDS_DB_INSTANCE_DRIVER + +PARQUET_OUTPUT_S3_BUCKET_NAME = args["rds_to_parquet_output_s3_bucket"] + +GLUE_CATALOG_DB_NAME = args["glue_catalog_db_name"] +GLUE_CATALOG_TBL_NAME = args["glue_catalog_tbl_name"] +GLUE_CATALOG_DV_BUCKET = args["glue_catalog_dv_bucket"] + +CATALOG_DB_TABLE_PATH = f"""{GLUE_CATALOG_DB_NAME}/{GLUE_CATALOG_TBL_NAME}""" +CATALOG_TABLE_S3_FULL_PATH = f'''s3://{GLUE_CATALOG_DV_BUCKET}/{CATALOG_DB_TABLE_PATH}''' + + +NVL_DTYPE_DICT = Logical_Constants.NVL_DTYPE_DICT + +INT_DATATYPES_LIST = Logical_Constants.INT_DATATYPES_LIST + +RECORDED_PKEYS_LIST = Logical_Constants.RECORDED_PKEYS_LIST + +QUERY_STR_DICT = SQLServer_Extract_Transform.QUERY_STR_DICT + +# ================================================================== +# USER-DEFINED-FUNCTIONS +# ---------------------- + +def print_existing_s3parquet_stats(prq_table_folder_path): + total_files, total_size = S3Methods.get_s3_folder_info( + PARQUET_OUTPUT_S3_BUCKET_NAME, + prq_table_folder_path) + + msg_part_1 = f"""> total_files={total_files}""" + msg_part_2 = f"""> total_size_mb={total_size/1024/1024:.2f}""" + LOGGER.info(f"""{msg_part_1}, {msg_part_2}""") + + +def compare_rds_parquet_samples(rds_jdbc_conn_obj, + rds_db_table_name, + df_rds_query_read: DataFrame, + jdbc_partition_column, + prq_table_folder_path, + validation_sample_fraction_float) -> DataFrame: + + df_dv_output_schema = T.StructType( + [T.StructField("run_datetime", T.TimestampType(), True), + T.StructField("json_row", T.StringType(), True), + T.StructField("validation_msg", T.StringType(), True), + T.StructField("database_name", T.StringType(), True), + T.StructField("full_table_name", T.StringType(), True), + T.StructField("table_to_ap", T.StringType(), True)]) + + df_dv_output = CustomPysparkMethods.get_pyspark_empty_df(df_dv_output_schema) + + s3_table_folder_path = f"""s3://{PARQUET_OUTPUT_S3_BUCKET_NAME}/{prq_table_folder_path}""" + LOGGER.info(f"""Parquet Source being used for comparison: {s3_table_folder_path}""") + + df_parquet_read = spark.read.schema(df_rds_query_read.schema).parquet(s3_table_folder_path) + + df_parquet_read_sample = df_parquet_read.sample(validation_sample_fraction_float) + + df_parquet_read_sample_t1 = df_parquet_read_sample.selectExpr( + *CustomPysparkMethods.get_nvl_select_list( + df_parquet_read_sample, + rds_jdbc_conn_obj, + rds_db_table_name + ) + ) + + validation_sample_df_repartition_num = int(args['validation_sample_df_repartition_num']) + if validation_sample_df_repartition_num != 0: + df_parquet_read_sample_t1 = df_parquet_read_sample_t1.repartition( + validation_sample_df_repartition_num, + jdbc_partition_column + ) + # -------- + + df_rds_read_sample = df_rds_query_read.join(df_parquet_read_sample, + on=jdbc_partition_column, + how='leftsemi') + + df_rds_read_sample_t1 = df_rds_read_sample.selectExpr( + *CustomPysparkMethods.get_nvl_select_list( + df_rds_read_sample, + rds_jdbc_conn_obj, + rds_db_table_name + ) + ) + if validation_sample_df_repartition_num != 0: + df_rds_read_sample_t1 = df_rds_read_sample_t1.repartition( + validation_sample_df_repartition_num, + jdbc_partition_column + ) + # -------- + + df_prq_leftanti_rds = df_parquet_read_sample_t1.alias("L")\ + .join(df_rds_read_sample_t1.alias("R"), + on=df_parquet_read_sample_t1.columns, + how='leftanti') + + # df_prq_leftanti_rds = df_parquet_read_sample_t1.alias("L")\ + # .join(df_rds_read_sample_t1.alias("R"), + # on=jdbc_partition_column, how='left')\ + # .where(" or ".join([f"L.{column} != R.{column}" + # for column in df_rds_read_sample_t1.columns + # if column != jdbc_partition_column]))\ + # .select("L.*") + + df_prq_read_filtered_count = df_prq_leftanti_rds.count() + + LOGGER.info(f"""Rows sample taken = {df_parquet_read_sample.count()}""") + + if df_prq_read_filtered_count == 0: + temp_msg = f"""{validation_sample_fraction_float} - Sample Rows Validated.""" + df_temp_row = spark.sql(f"""select + current_timestamp() as run_datetime, + '' as json_row, + "{temp_msg}" as validation_msg, + '{rds_jdbc_conn_obj.rds_db_name}' as database_name, + '{db_sch_tbl}' as full_table_name, + 'False' as table_to_ap + """.strip()) + + LOGGER.info(f"{rds_db_table_name}: Validation Successful - 1") + df_dv_output = df_dv_output.union(df_temp_row) + else: + + LOGGER.warn( + f"""Parquet-RDS Subtract Report: ({df_prq_read_filtered_count}): Row(s) differences found!""") + + df_subtract_temp = (df_prq_leftanti_rds + .withColumn('json_row', F.to_json(F.struct(*[F.col(c) + for c in df_rds_query_read.columns]))) + .selectExpr("json_row") + .limit(100)) + + temp_msg = f"""{validation_sample_fraction_float}-Rows Sample Used:\n""" + df_subtract_temp = df_subtract_temp.selectExpr( + "current_timestamp as run_datetime", + "json_row", + f""""{temp_msg}>{df_prq_read_filtered_count} Rows - Validation Failed !" as validation_msg""", + f"""'{rds_jdbc_conn_obj.rds_db_name}' as database_name""", + f"""'{db_sch_tbl}' as full_table_name""", + """'False' as table_to_ap""" + ) + LOGGER.warn(f"{rds_db_table_name}: Validation Failed - 2") + df_dv_output = df_dv_output.union(df_subtract_temp) + # ----------------------------------------------------- + + return df_dv_output + + +def write_rds_to_s3parquet(df_rds_query_read: DataFrame, prq_table_folder_path): + + s3_table_folder_path = f"""s3://{PARQUET_OUTPUT_S3_BUCKET_NAME}/{prq_table_folder_path}""" + + if S3Methods.check_s3_folder_path_if_exists(PARQUET_OUTPUT_S3_BUCKET_NAME, + prq_table_folder_path): + + LOGGER.info(f"""Purging S3-path: {s3_table_folder_path}""") + glueContext.purge_s3_path(s3_table_folder_path, options={"retentionPeriod": 0}) + # -------------------------------------------------------------------- + + dydf = DynamicFrame.fromDF(df_rds_query_read, glueContext, "final_spark_df") + + glueContext.write_dynamic_frame.from_options(frame=dydf, connection_type='s3', format='parquet', + connection_options={ + 'path': f"""{s3_table_folder_path}/""" + }, + format_options={ + 'useGlueParquetWriter': True, + 'compression': 'snappy', + 'blockSize': 13421773, + 'pageSize': 1048576 + }) + LOGGER.info(f"""df_rds_query_read - dataframe written to -> {s3_table_folder_path}/""") + + +def write_dv_report_to_s3parquet(df_dv_output: DataFrame, + rds_jdbc_conn_obj, + db_sch_tbl_name): + + db_name = rds_jdbc_conn_obj.rds_db_name + df_dv_output = df_dv_output.repartition(1) + + prq_table_folder_path = f"""{args["glue_catalog_db_name"]}/{args["glue_catalog_tbl_name"]}""" + s3_table_folder_path = f'''s3://{GLUE_CATALOG_DV_BUCKET}/{prq_table_folder_path}''' + + if S3Methods.check_s3_folder_path_if_exists(GLUE_CATALOG_DV_BUCKET, + f'''{prq_table_folder_path}/database_name={db_name}/full_table_name={db_sch_tbl_name}''' + ): + LOGGER.info( + f"""Purging S3-path: {s3_table_folder_path}/database_name={db_name}/full_table_name={db_sch_tbl_name}""") + + glueContext.purge_s3_path(f"""{s3_table_folder_path}/database_name={db_name}/full_table_name={db_sch_tbl_name}""", + options={"retentionPeriod": 0} + ) + # --------------------------------------------------------------------- + + dydf = DynamicFrame.fromDF(df_dv_output, glueContext, "final_spark_df") + + glueContext.write_dynamic_frame.from_options(frame=dydf, connection_type='s3', format='parquet', + connection_options={ + 'path': f"""{s3_table_folder_path}/""", + "partitionKeys": ["database_name", "full_table_name"] + }, + format_options={ + 'useGlueParquetWriter': True, + 'compression': 'snappy', + 'blockSize': 13421773, + 'pageSize': 1048576 + }) + LOGGER.info( + f"""'{db_sch_tbl_name}' validation report written to -> {s3_table_folder_path}/""") + +# =================================================================================================== + + +if __name__ == "__main__": + + # ------------------------------------------- + if args.get("rds_sqlserver_db", None) is None: + LOGGER.error(f"""'rds_sqlserver_db' runtime input is missing! Exiting ...""") + sys.exit(1) + else: + rds_sqlserver_db = args["rds_sqlserver_db"] + LOGGER.info(f"""Given rds_sqlserver_db = {rds_sqlserver_db}""") + + if args.get("rds_sqlserver_db_schema", None) is None: + LOGGER.error( + f"""'rds_sqlserver_db_schema' runtime input is missing! Exiting ...""") + sys.exit(1) + else: + rds_sqlserver_db_schema = args["rds_sqlserver_db_schema"] + LOGGER.info( + f"""Given rds_sqlserver_db_schema = {rds_sqlserver_db_schema}""") + # ------------------------------------------- + + rds_jdbc_conn_obj = RDS_JDBC_CONNECTION(RDS_DB_HOST_ENDPOINT, + RDS_DB_INSTANCE_PWD, + rds_sqlserver_db, + rds_sqlserver_db_schema) + # ------------------------------------------- + + try: + rds_db_name = rds_jdbc_conn_obj.check_if_rds_db_exists()[0] + except IndexError: + LOGGER.error( + f"""Given database name not found! >> {args['rds_sqlserver_db']} <<""") + sys.exit(1) + except Exception as e: + LOGGER.error(e) + # ------------------------------------------------------- + + rds_sqlserver_db_tbl_list = rds_jdbc_conn_obj.get_rds_db_tbl_list() + if not rds_sqlserver_db_tbl_list: + LOGGER.error(f"""rds_sqlserver_db_tbl_list - is empty. Exiting ...!""") + sys.exit(1) + else: + message_prefix = f"""Total List of tables available in {rds_db_name}.{rds_sqlserver_db_schema}""" + LOGGER.info(f"""{message_prefix}\n{rds_sqlserver_db_tbl_list}""") + # ------------------------------------------------------- + + if args.get("rds_sqlserver_db_table", None) is None: + LOGGER.error( + f"""'rds_sqlserver_db_table' runtime input is missing! Exiting ...""") + sys.exit(1) + else: + rds_sqlserver_db_table = args["rds_sqlserver_db_table"] + table_name_prefix = f"""{rds_db_name}_{rds_sqlserver_db_schema}""" + db_sch_tbl = f"""{table_name_prefix}_{rds_sqlserver_db_table}""" + # ------------------------------------------------------- + + if db_sch_tbl not in rds_sqlserver_db_tbl_list: + LOGGER.error(f"""'{db_sch_tbl}' - is not an existing table! Exiting ...""") + sys.exit(1) + else: + LOGGER.info(f""">> Given RDS SqlServer-DB Table: {rds_sqlserver_db_table} <<""") + # ------------------------------------------------------- + + rds_db_tbl_pkey_column = args['rds_db_tbl_pkey_column'] + LOGGER.info(f"""rds_db_tbl_pkey_column = {rds_db_tbl_pkey_column}""") + # ----------------------------------------- + + rds_db_table_empty_df = rds_jdbc_conn_obj.get_rds_db_table_empty_df(rds_sqlserver_db_table) + + df_rds_dtype_dict = CustomPysparkMethods.get_dtypes_dict(rds_db_table_empty_df) + int_dtypes_colname_list = [colname for colname, dtype in df_rds_dtype_dict.items() + if dtype in INT_DATATYPES_LIST] + + if rds_db_tbl_pkey_column not in int_dtypes_colname_list: + LOGGER.error(f"""rds_db_tbl_pkey_column = {rds_db_tbl_pkey_column} is not an integer datatype column! + """.strip()) + sys.exit(1) + # ---------------------------------------------------- + + jdbc_read_partitions_num = int(args.get('jdbc_read_partitions_num', 0)) + + jdbc_read_partitions_num = 1 if jdbc_read_partitions_num <= 0 \ + else jdbc_read_partitions_num + LOGGER.info(f"""jdbc_read_partitions_num = {jdbc_read_partitions_num}""") + + agg_row_dict = rds_jdbc_conn_obj.get_min_max_pkey_filter( + rds_sqlserver_db_table, + rds_db_tbl_pkey_column + ) + min_pkey = agg_row_dict['min_value'] + LOGGER.info(f"""min_pkey = {min_pkey}""") + + max_pkey = agg_row_dict['max_value'] + LOGGER.info(f"""max_pkey = {max_pkey}""") + + rds_transformed_query = QUERY_STR_DICT[f"{db_sch_tbl}"] + LOGGER.info(f"""rds_transformed_query = \n{rds_transformed_query}""") + + df_rds_query_read = rds_jdbc_conn_obj.get_rds_df_read_query_pkey_parallel( + rds_transformed_query, + rds_db_tbl_pkey_column, + min_pkey, + max_pkey, + jdbc_read_partitions_num + ) + + LOGGER.info( + f"""df_rds_query_read-{db_sch_tbl}: READ PARTITIONS = {df_rds_query_read.rdd.getNumPartitions()}""") + + df_rds_query_read_columns = df_rds_query_read.columns + LOGGER.info(f"""df_rds_query_read_columns = {df_rds_query_read_columns}""") + + df_rds_query_read_schema = df_rds_query_read.schema + LOGGER.info(f"""df_rds_query_read_schema = \n{[obj for obj in df_rds_query_read_schema]}""") + + rds_df_repartition_num = int(args['rds_df_repartition_num']) + + if rds_df_repartition_num != 0: + df_rds_query_read = df_rds_query_read.repartition(rds_df_repartition_num, + rds_db_tbl_pkey_column) + int_repartitions = df_rds_query_read.rdd.getNumPartitions() + LOGGER.info( + f"""df_rds_query_read: After Repartitioning -> {int_repartitions} partitions.""") + # ---------------------------------------------------- + + rename_output_table_folder = args.get('rename_migrated_prq_tbl_folder', None) + prq_table_folder_name = rds_sqlserver_db_table if rename_output_table_folder is None \ + else rename_output_table_folder + # --------------------------------------- + + prq_table_folder_path = f"""{rds_db_name}/{rds_sqlserver_db_schema}/{prq_table_folder_name}""" + LOGGER.info(f"""prq_table_folder_path = {prq_table_folder_path}""") + + validation_only_run = args['validation_only_run'] + + validation_sample_fraction_float = float(args.get('validation_sample_fraction_float', 0)) + validation_sample_fraction_float = 1.0 if validation_sample_fraction_float > 1 \ + else validation_sample_fraction_float + + temp_msg = f"""validation_sample_fraction_float = {validation_sample_fraction_float}""" + if validation_only_run != "true": + if validation_sample_fraction_float != 0: + df_rds_query_read = df_rds_query_read.cache() + write_rds_to_s3parquet(df_rds_query_read, prq_table_folder_path) + print_existing_s3parquet_stats(prq_table_folder_path) + LOGGER.info(f"""> Starting validation: {temp_msg}""") + df_dv_output = compare_rds_parquet_samples(rds_jdbc_conn_obj, + rds_sqlserver_db_table, + df_rds_query_read, + rds_db_tbl_pkey_column, + prq_table_folder_path, + validation_sample_fraction_float + ) + write_dv_report_to_s3parquet(df_dv_output, rds_jdbc_conn_obj, db_sch_tbl) + df_rds_query_read.unpersist() + else: + write_rds_to_s3parquet(df_rds_query_read, prq_table_folder_path) + print_existing_s3parquet_stats(prq_table_folder_path) + LOGGER.warn(f"""{temp_msg}\nValidation not enabled. Skipping ...""") + + else: + LOGGER.warn(f""">> validation_only_run - ENABLED <<""") + print_existing_s3parquet_stats(prq_table_folder_path) + + if validation_sample_fraction_float != 0: + LOGGER.info(f"""> Starting validation: {temp_msg}""") + df_dv_output = compare_rds_parquet_samples(rds_jdbc_conn_obj, + rds_sqlserver_db_table, + df_rds_query_read, + rds_db_tbl_pkey_column, + prq_table_folder_path, + validation_sample_fraction_float + ) + write_dv_report_to_s3parquet(df_dv_output, rds_jdbc_conn_obj, db_sch_tbl) + else: + LOGGER.warn(f"""{temp_msg} => Skipping Validation !""") + # --------------------------------------------------------------- + + job.commit() diff --git a/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_tbl_hash_rows_to_s3_prq_partitionby_yyyy_mm.py b/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_tbl_hash_rows_to_s3_prq_partitionby_yyyy_mm.py new file mode 100644 index 00000000000..966d86fce1f --- /dev/null +++ b/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_tbl_hash_rows_to_s3_prq_partitionby_yyyy_mm.py @@ -0,0 +1,420 @@ + +import sys + +# from logging import getLogger +# import pandas as pd + +from glue_data_validation_lib import RDSConn_Constants +from glue_data_validation_lib import SparkSession +from glue_data_validation_lib import Logical_Constants +from glue_data_validation_lib import RDS_JDBC_CONNECTION +from glue_data_validation_lib import S3Methods +from glue_data_validation_lib import CustomPysparkMethods + +from awsglue.utils import getResolvedOptions +from awsglue.transforms import * + +from awsglue.dynamicframe import DynamicFrame +from awsglue.job import Job + +# from pyspark.conf import SparkConf +from pyspark.sql import DataFrame +import pyspark.sql.functions as F +# import pyspark.sql.types as T + +# from pyspark.storagelevel import StorageLevel + +# =============================================================================== + +sc = SparkSession.sc +sc._jsc.hadoopConfiguration().set("spark.dynamicAllocation.enabled", "true") + +spark = SparkSession.spark + +glueContext = SparkSession.glueContext +LOGGER = glueContext.get_logger() + +# =============================================================================== + + +# =============================================================================== + + +# Organise capturing input parameters. +DEFAULT_INPUTS_LIST = ["JOB_NAME", + "script_bucket_name", + "rds_db_host_ep", + "rds_db_pwd", + "rds_sqlserver_db", + "rds_sqlserver_db_schema", + "rds_sqlserver_db_table", + "rds_db_tbl_pkey_column", + "date_partition_column_name", + "parallel_jdbc_conn_num", + "rds_yyyy_mm_df_repartition_num", + "year_partition_bool", + "month_partition_bool", + "hashed_output_s3_bucket_name", + "rds_db_table_hashed_rows_parent_dir" + ] + +OPTIONAL_INPUTS = [ + "rds_query_where_clause", + "coalesce_int" +] + +AVAILABLE_ARGS_LIST = CustomPysparkMethods.resolve_args(DEFAULT_INPUTS_LIST+OPTIONAL_INPUTS) + +args = getResolvedOptions(sys.argv, AVAILABLE_ARGS_LIST) + +# ------------------------------ + +job = Job(glueContext) +job.init(args["JOB_NAME"], args) + +# ------------------------------ + +RDS_DB_HOST_ENDPOINT = args["rds_db_host_ep"] +RDS_DB_PORT = RDSConn_Constants.RDS_DB_PORT +RDS_DB_INSTANCE_USER = RDSConn_Constants.RDS_DB_INSTANCE_USER +RDS_DB_INSTANCE_PWD = args["rds_db_pwd"] +RDS_DB_INSTANCE_DRIVER = RDSConn_Constants.RDS_DB_INSTANCE_DRIVER + +HASHED_OUTPUT_S3_BUCKET_NAME = args["hashed_output_s3_bucket_name"] +RDS_DB_TABLE_HASHED_ROWS_PARENT_DIR = args["rds_db_table_hashed_rows_parent_dir"] + +ATHENA_RUN_OUTPUT_LOCATION = f"s3://{HASHED_OUTPUT_S3_BUCKET_NAME}/athena_temp_store/" + +INT_DATATYPES_LIST = Logical_Constants.INT_DATATYPES_LIST + +TBL_COLS_CONVERT_FMT_DICT = {'GPSPosition': + {'Latitude': 'CONVERT(VARCHAR(MAX), CONVERT(DECIMAL(10,7), Latitude))', + 'RecordedDatetime':'CONVERT(VARCHAR, RecordedDatetime, 120)', + 'AuditDateTime':'CONVERT(VARCHAR, AuditDateTime, 121)' + } + } + +# =============================================================================== + + +def write_rds_df_to_s3_parquet_v2(df_rds_write: DataFrame, + partition_by_cols, + prq_table_folder_path): + """ + Write dynamic frame in S3 and catalog it. + """ + + # s3://dms-rds-to-parquet-20240606144708618700000001/g4s_emsys_mvp/dbo/GPSPosition_V2/ + # s3://dms-rds-to-parquet-20240606144708618700000001/g4s_emsys_mvp/dbo/GPSPosition_V2/year=2019/month=10/ + + s3_table_folder_path = f"""s3://{HASHED_OUTPUT_S3_BUCKET_NAME}/{prq_table_folder_path}""" + + # Note: The below block of code erases the existing partition & use cautiously. + # partition_path = f"""{s3_table_folder_path}/year=2019/month=10/""" + # if check_s3_folder_path_if_exists(PARQUET_OUTPUT_S3_BUCKET_NAME, partition_path): + + # LOGGER.info(f"""Purging S3-path: {partition_path}""") + # glueContext.purge_s3_path(partition_path, options={"retentionPeriod": 0}) + # # -------------------------------------------------------------------- + + dynamic_df_write = glueContext.getSink( + format_options={ + "compression": "snappy", + "useGlueParquetWriter": True + }, + path=f"""{s3_table_folder_path}/""", + connection_type="s3", + updateBehavior="UPDATE_IN_DATABASE", + partitionKeys=partition_by_cols, + enableUpdateCatalog=True, + transformation_ctx="dynamic_df_write", + ) + + catalog_db, catalog_db_tbl = prq_table_folder_path.split(f"""/{args['rds_sqlserver_db_schema']}/""") + dynamic_df_write.setCatalogInfo( + catalogDatabase=catalog_db.lower(), + catalogTableName=catalog_db_tbl.lower() + ) + + dynamic_df_write.setFormat("glueparquet") + + dydf_rds_read = DynamicFrame.fromDF(df_rds_write, glueContext, "final_spark_df") + dynamic_df_write.writeFrame(dydf_rds_read) + + LOGGER.info(f"""'{db_sch_tbl}' table data written to -> {s3_table_folder_path}/""") + + # ddl_refresh_table_partitions = f"msck repair table {catalog_db.lower()}.{catalog_db_tbl.lower()}" + # LOGGER.info(f"""ddl_refresh_table_partitions:> \n{ddl_refresh_table_partitions}""") + + # # Refresh table prtitions + # execution_id = run_athena_query(ddl_refresh_table_partitions) + # LOGGER.info(f"SQL-Statement execution id: {execution_id}") + + # # Check query execution + # query_status = has_query_succeeded(execution_id=execution_id) + # LOGGER.info(f"Query state: {query_status}") + + +def write_rds_df_to_s3_parquet(df_rds_write: DataFrame, + partition_by_cols, + prq_table_folder_path): + + # s3://dms-rds-to-parquet-20240606144708618700000001/g4s_cap_dw/dbo/F_History/ + + s3_table_folder_path = f"""s3://{HASHED_OUTPUT_S3_BUCKET_NAME}/{prq_table_folder_path}""" + + if S3Methods.check_s3_folder_path_if_exists(HASHED_OUTPUT_S3_BUCKET_NAME, + prq_table_folder_path): + + LOGGER.info(f"""Purging S3-path: {s3_table_folder_path}""") + glueContext.purge_s3_path(s3_table_folder_path, options={"retentionPeriod": 0}) + # -------------------------------------------------------------------- + + # catalog_db, catalog_db_tbl = prq_table_folder_path.split(f"""/{args['rds_sqlserver_db_schema']}/""") + + dydf = DynamicFrame.fromDF(df_rds_write, glueContext, "final_spark_df") + + glueContext.write_dynamic_frame.from_options(frame=dydf, connection_type='s3', format='parquet', + connection_options={ + 'path': f"""{s3_table_folder_path}/""", + "partitionKeys": partition_by_cols + }, + format_options={ + 'useGlueParquetWriter': True, + 'compression': 'snappy', + 'blockSize': 13421773, + 'pageSize': 1048576 + }) + LOGGER.info(f"""'{db_sch_tbl}' table data written to -> {s3_table_folder_path}/""") + +# =================================================================================================== + + +if __name__ == "__main__": + + # VERIFY GIVEN INPUTS - START + # ------------------------------------------- + + if args.get("rds_sqlserver_db", None) is None: + LOGGER.error(f"""'rds_sqlserver_db' runtime input is missing! Exiting ...""") + sys.exit(1) + else: + rds_sqlserver_db = args["rds_sqlserver_db"] + LOGGER.info(f"""Given rds_sqlserver_db = {rds_sqlserver_db}""") + + if args.get("rds_sqlserver_db_schema", None) is None: + LOGGER.error(f"""'rds_sqlserver_db_schema' runtime input is missing! Exiting ...""") + sys.exit(1) + else: + rds_sqlserver_db_schema = args["rds_sqlserver_db_schema"] + LOGGER.info(f"""Given rds_sqlserver_db_schema = {rds_sqlserver_db_schema}""") + # ------------------------------------------- + + rds_jdbc_conn_obj = RDS_JDBC_CONNECTION(RDS_DB_HOST_ENDPOINT, + RDS_DB_INSTANCE_PWD, + rds_sqlserver_db, + rds_sqlserver_db_schema) + + try: + rds_db_name = rds_jdbc_conn_obj.check_if_rds_db_exists()[0] + except IndexError: + LOGGER.error(f"""Given database name not found! >> {args['rds_sqlserver_db']} <<""") + sys.exit(1) + except Exception as e: + LOGGER.error(e) + # ------------------------------------------------------- + + rds_sqlserver_db_tbl_list = rds_jdbc_conn_obj.get_rds_db_tbl_list() + if not rds_sqlserver_db_tbl_list: + LOGGER.error(f"""rds_sqlserver_db_tbl_list - is empty. Exiting ...!""") + sys.exit(1) + else: + message_prefix = f"""Total List of tables available in {rds_db_name}.{rds_sqlserver_db_schema}""" + LOGGER.info(f"""{message_prefix}\n{rds_sqlserver_db_tbl_list}""") + # ------------------------------------------------------- + + if args.get("rds_sqlserver_db_table", None) is None: + LOGGER.error(f"""'rds_sqlserver_db_table' runtime input is missing! Exiting ...""") + sys.exit(1) + else: + rds_sqlserver_db_table = args["rds_sqlserver_db_table"] + table_name_prefix = f"""{rds_db_name}_{rds_sqlserver_db_schema}""" + db_sch_tbl = f"""{table_name_prefix}_{rds_sqlserver_db_table}""" + # -------------------------------------------------------------------- + + if db_sch_tbl not in rds_sqlserver_db_tbl_list: + LOGGER.error(f"""'{db_sch_tbl}' - is not an existing table! Exiting ...""") + sys.exit(1) + else: + LOGGER.info(f""">> Given RDS SqlServer-DB Table: {rds_sqlserver_db_table} <<""") + # ------------------------------------------------------- + + rds_db_tbl_pkey_column = args['rds_db_tbl_pkey_column'] + LOGGER.info(f""">> rds_db_tbl_pkey_column = {rds_db_tbl_pkey_column} <<""") + + rds_db_table_empty_df = rds_jdbc_conn_obj.get_rds_db_table_empty_df( + rds_sqlserver_db_table) + + df_rds_dtype_dict = CustomPysparkMethods.get_dtypes_dict(rds_db_table_empty_df) + int_dtypes_colname_list = [colname for colname, dtype in df_rds_dtype_dict.items() + if dtype in INT_DATATYPES_LIST] + + if rds_db_tbl_pkey_column not in int_dtypes_colname_list: + LOGGER.error( + f"""PrimaryKey column-'{rds_db_tbl_pkey_column}' is not an integer datatype !""") + sys.exit(1) + # --------------------------------------- + + all_columns_except_pkey = list() + conversion_col_list = list() + if TBL_COLS_CONVERT_FMT_DICT.get( + f"{rds_sqlserver_db_table}", None) is not None: + conversion_col_list = list( + TBL_COLS_CONVERT_FMT_DICT[ + f"{rds_sqlserver_db_table}"].keys() + ) + for e in rds_db_table_empty_df.schema.fields: + if e.name == rds_db_tbl_pkey_column: + continue + + if e.name in conversion_col_list: + all_columns_except_pkey.append( + TBL_COLS_CONVERT_FMT_DICT[f"{rds_sqlserver_db_table}"][f"{e.name}"] + ) + else: + all_columns_except_pkey.append(f"{e.name}") + + LOGGER.info(f""">> all_columns_except_pkey = {all_columns_except_pkey} <<""") + # --------------------------------------- + + date_partition_column_name = args['date_partition_column_name'] + LOGGER.info(f"""date_partition_column_name = {date_partition_column_name}""") + + parallel_jdbc_conn_num = int(args['parallel_jdbc_conn_num']) + LOGGER.info(f"""parallel_jdbc_conn_num = {parallel_jdbc_conn_num}""") + + rds_yyyy_mm_df_repartition_num = int(args['rds_yyyy_mm_df_repartition_num']) + LOGGER.info(f"""rds_yyyy_mm_df_repartition_num = {rds_yyyy_mm_df_repartition_num}""") + + yyyy_mm_partition_by_cols = list() + if args['year_partition_bool'] == 'true': + yyyy_mm_partition_by_cols.append("year") + + if args['month_partition_bool'] == 'true': + yyyy_mm_partition_by_cols.append("month") + + LOGGER.info(f"""yyyy_mm_partition_by_cols = {yyyy_mm_partition_by_cols}""") + + prq_table_folder_path = f""" + {RDS_DB_TABLE_HASHED_ROWS_PARENT_DIR}/{rds_db_name}/{rds_sqlserver_db_schema}/{rds_sqlserver_db_table}""".lstrip() + # ----------------------------------------- + # VERIFY GIVEN INPUTS - END + # ----------------------------------------- + + rds_query_where_clause = args.get('rds_query_where_clause', None) + if rds_query_where_clause is not None: + rds_query_where_clause = rds_query_where_clause.strip() + + agg_row_dict_list = rds_jdbc_conn_obj.get_min_max_groupby_month( + rds_sqlserver_db_table, + date_partition_column_name, + rds_db_tbl_pkey_column, + rds_query_where_clause + ) + LOGGER.info(f"""agg_row_dict_list:>\n{[agg_row_dict for agg_row_dict in agg_row_dict_list]}""") + + rds_db_hash_cols_query_str = f""" + SELECT {rds_db_tbl_pkey_column}, + LOWER(SUBSTRING(CONVERT(VARCHAR(66), + HASHBYTES('SHA2_256', CONCAT_WS('', {', '.join(all_columns_except_pkey)})), 1), 3, 66)) AS RowHash, + YEAR({date_partition_column_name}) AS year, + MONTH({date_partition_column_name}) AS month + FROM {rds_sqlserver_db_schema}.[{rds_sqlserver_db_table}] + """.strip() + + + for agg_row_dict in agg_row_dict_list: + + agg_row_year = agg_row_dict['year'] + agg_row_month = agg_row_dict['month'] + min_pkey_value = agg_row_dict['min_pkey_value'] + max_pkey_value = agg_row_dict['max_pkey_value'] + LOGGER.info(f"""agg_row_year = {agg_row_year}""") + LOGGER.info(f"""agg_row_month = {agg_row_month}""") + LOGGER.info(f"""min_pkey_value = {min_pkey_value}""") + LOGGER.info(f"""max_pkey_value = {max_pkey_value}""") + + pkey_between_clause_str_temp = f""" + WHERE {rds_db_tbl_pkey_column} between {min_pkey_value} and {max_pkey_value}""".strip() + + rds_db_select_query_str_temp = rds_db_hash_cols_query_str + pkey_between_clause_str_temp + LOGGER.info(f"""rds_db_select_query_str_temp = \n{rds_db_select_query_str_temp}""") + + rds_hashed_rows_df = rds_jdbc_conn_obj.get_rds_df_read_query_pkey_parallel( + rds_db_select_query_str_temp, + rds_db_tbl_pkey_column, + min_pkey_value, + max_pkey_value, + parallel_jdbc_conn_num + ) + # ---------------------------------------------------------- + temp_msg = f"""{agg_row_year}_{agg_row_month}-rds_hashed_rows_df""" + LOGGER.info( + f"""{temp_msg}: READ PARTITIONS = {rds_hashed_rows_df.rdd.getNumPartitions()}""") + + if 'year' in yyyy_mm_partition_by_cols \ + and 'year' not in rds_hashed_rows_df.columns: + rds_hashed_rows_df = rds_hashed_rows_df.withColumn( + "year", F.year(date_partition_column_name)) + + if 'month' in yyyy_mm_partition_by_cols \ + and 'month' not in rds_hashed_rows_df.columns: + rds_hashed_rows_df = rds_hashed_rows_df.withColumn( + "month", F.month(date_partition_column_name)) + + rds_hashed_rows_df = rds_hashed_rows_df.where( + f"""year = {agg_row_year} and month = {agg_row_month}""") + + if rds_yyyy_mm_df_repartition_num != 0: + # Note: Default 'partitionby_columns' values may not be appropriate for all the scenarios. + # So, the user can edit the list-'partitionby_columns' value(s) if required at runtime. + # Example: partitionby_columns = ['month'] + # The above scenario may be when the rds-source-dataframe filtered on single 'year' value. + partitionby_columns = yyyy_mm_partition_by_cols + [rds_db_tbl_pkey_column] + + LOGGER.info(f"""{temp_msg}: Repartitioning on {partitionby_columns}""") + rds_hashed_rows_df = rds_hashed_rows_df.repartition(rds_yyyy_mm_df_repartition_num, *partitionby_columns) + + LOGGER.info( + f"""{temp_msg}: After Repartitioning -> {rds_hashed_rows_df.rdd.getNumPartitions()} partitions.""") + # ---------------------------------------------------- + + # Note: If many small size parquet files are created for each partition, + # consider using 'orderBy', 'coalesce' features appropriately before writing dataframe into S3 bucket. + # df_rds_write = rds_hashed_rows_df.coalesce(1) + + # NOTE: When filtered rows (ex: based on 'year') are used in separate consecutive batch runs, + # consider to appropriately use the parquet write functions with features in built as per the below details. + # - write_rds_df_to_s3_parquet(): Overwrites the existing partitions by default. + # - write_rds_df_to_s3_parquet_v2(): Adds the new partitions & also the corresponding partitions are updated in athena tables. + coalesce_int = int(args.get('coalesce_int', 0)) + if coalesce_int != 0: + LOGGER.warn(f"""{temp_msg}:> coalesce_int = {coalesce_int}""") + rds_hashed_rows_df_write = rds_hashed_rows_df.coalesce(coalesce_int) + else: + rds_hashed_rows_df_write = rds_hashed_rows_df.alias("rds_hashed_rows_df_write") + + write_rds_df_to_s3_parquet(rds_hashed_rows_df_write, + yyyy_mm_partition_by_cols, + prq_table_folder_path) + + LOGGER.info(f"""Partition - '{prq_table_folder_path}/{agg_row_year}/{agg_row_month}' writing completed.""") + # ----------------------------------------------- + + total_files, total_size = S3Methods.get_s3_folder_info(HASHED_OUTPUT_S3_BUCKET_NAME, + f"{prq_table_folder_path}/") + msg_part_1 = f"""total_files={total_files}""" + msg_part_2 = f"""total_size_mb={total_size/1024/1024:.2f}""" + LOGGER.info(f"""'{prq_table_folder_path}': {msg_part_1}, {msg_part_2}""") + + job.commit() diff --git a/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_to_s3_parquet_partitionby_yyyy_mm.py b/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_to_s3_parquet_partitionby_yyyy_mm.py index a9d461e091c..5003568823b 100644 --- a/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_to_s3_parquet_partitionby_yyyy_mm.py +++ b/terraform/environments/electronic-monitoring-data/glue-job/etl_rds_to_s3_parquet_partitionby_yyyy_mm.py @@ -420,7 +420,7 @@ def write_rds_df_to_s3_parquet(df_rds_write: DataFrame, # ----------------------------------------------- total_files, total_size = S3Methods.get_s3_folder_info(PARQUET_OUTPUT_S3_BUCKET_NAME, - prq_table_folder_path) + f"{prq_table_folder_path}/") msg_part_1 = f"""total_files={total_files}""" msg_part_2 = f"""total_size_mb={total_size/1024/1024:.2f}""" LOGGER.info(f"""'{prq_table_folder_path}': {msg_part_1}, {msg_part_2}""") diff --git a/terraform/environments/electronic-monitoring-data/glue-job/etl_table_rows_hashvalue_to_parquet.py b/terraform/environments/electronic-monitoring-data/glue-job/etl_table_rows_hashvalue_to_parquet.py index bc6896c1eb6..ffba0e5cad5 100644 --- a/terraform/environments/electronic-monitoring-data/glue-job/etl_table_rows_hashvalue_to_parquet.py +++ b/terraform/environments/electronic-monitoring-data/glue-job/etl_table_rows_hashvalue_to_parquet.py @@ -201,7 +201,7 @@ def write_parquet_to_s3(hashed_rows_prq_df_write: DataFrame, hashed_rows_prq_ful FROM {rds_sqlserver_db_schema}.[{rds_sqlserver_db_table}] """.strip() - parallel_jdbc_conn_num = args['parallel_jdbc_conn_num'] + parallel_jdbc_conn_num = int(args['parallel_jdbc_conn_num']) parquet_df_write_repartition_num = int(args.get('parquet_df_write_repartition_num', 0)) diff --git a/terraform/environments/electronic-monitoring-data/glue-job/parquet_resize_or_partitionby_yyyy_mm_dd.py b/terraform/environments/electronic-monitoring-data/glue-job/parquet_resize_or_partitionby_yyyy_mm_dd.py index 5523b6a8560..6362f375934 100644 --- a/terraform/environments/electronic-monitoring-data/glue-job/parquet_resize_or_partitionby_yyyy_mm_dd.py +++ b/terraform/environments/electronic-monitoring-data/glue-job/parquet_resize_or_partitionby_yyyy_mm_dd.py @@ -215,7 +215,7 @@ def write_to_s3_parquet(df_prq_write: DataFrame, # ----------------------------------------------- total_files, total_size = S3Methods.get_s3_folder_info(PARQUET_WRITE_S3_BUCKET_NAME, - output_partition_path) + f"{output_partition_path}/") msg_part_1 = f"""total_files={total_files}""" msg_part_2 = f"""total_size_mb={total_size/1024/1024:.2f}""" LOGGER.info(f"""'{PRQ_WRITE_TABLE_FOLDER_PATH}': {msg_part_1}, {msg_part_2}""") diff --git a/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/glue_data_validation_lib.py b/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/glue_data_validation_lib.py index 3862c16a1b3..a78700aaac3 100644 --- a/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/glue_data_validation_lib.py +++ b/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/glue_data_validation_lib.py @@ -193,7 +193,7 @@ def get_rds_df_read_query_pkey_parallel(self, jdbc_partition_column, jdbc_partition_col_lowerbound, jdbc_partition_col_upperbound, - jdbc_read_partitions_num + jdbc_read_partitions_num=1 ) -> DataFrame: numPartitions = jdbc_read_partitions_num @@ -219,6 +219,16 @@ def get_rds_df_read_query_pkey_parallel(self, .option("numPartitions", numPartitions) .load()) + def get_rds_df_read_query(self, in_db_query) -> DataFrame: + + return (self.spark.read.format("jdbc") + .option("url", self.rds_jdbc_url_v2) + .option("driver", self.RDS_DB_INSTANCE_DRIVER) + .option("user", self.RDS_DB_INSTANCE_USER) + .option("password", self.RDS_DB_INSTANCE_PWD) + .option("dbtable", f"""({in_db_query}) as t""") + .load()) + def get_rds_df_query_min_max_count(self, rds_table_name, @@ -695,10 +705,8 @@ def get_rds_tbl_col_attr_dict(df_col_stats: DataFrame) -> DataFrame: def get_nvl_select_list(in_rds_df: DataFrame, rds_jdbc_conn_obj, in_rds_tbl_name): - df_col_attr = rds_jdbc_conn_obj.get_rds_tbl_col_attributes( - in_rds_tbl_name) - df_col_attr_dict = CustomPysparkMethods.get_rds_tbl_col_attr_dict( - df_col_attr) + df_col_attr = rds_jdbc_conn_obj.get_rds_tbl_col_attributes(in_rds_tbl_name) + df_col_attr_dict = CustomPysparkMethods.get_rds_tbl_col_attr_dict(df_col_attr) df_col_dtype_dict = CustomPysparkMethods.get_dtypes_dict(in_rds_df) temp_select_list = list() diff --git a/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/rds_transform_queries.py b/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/rds_transform_queries.py new file mode 100644 index 00000000000..a8b891f9618 --- /dev/null +++ b/terraform/environments/electronic-monitoring-data/glue-job/reusable_module/rds_transform_queries.py @@ -0,0 +1,60 @@ + +class SQLServer_Extract_Transform: + + QUERY_STR_DICT = { + + "g4s_cap_dw_dbo_D_Comments": """ + SELECT [CommentSID] + ,[VisitID] + ,[ActivityID] + ,trim(replace(replace(Comments, char(141), ''), char(129), '')) AS Comments + ,[CommentType] + FROM [g4s_cap_dw].[dbo].[D_Comments] + """.strip(), + + "g4s_emsys_tpims_dbo_CurfewSegment": """ + SELECT [CurfewSegmentID] + ,[CurfewID] + ,[CurfewSegmentType] + ,[BeginDatetime] + ,[EndDatetime] + ,[LastModifiedDatetime] + ,[DayFlags] + ,[AdditionalInfo] + ,[WeeksOn] + ,[WeeksOff] + ,[WeeksOffset] + ,[ExportToGovernment] + ,[PublicHolidaySegmentID] + ,[IsPublicHoliday] + ,[RowVersion] + ,CAST(StartTime as varchar(8)) as StartTime + ,CAST(EndTime as varchar(8)) as EndTime + ,[SegmentCategoryLookupID] + ,[ParentCurfewSegmentID] + ,[TravelTimeBefore] + ,[TravelTimeAfter] + FROM [g4s_emsys_tpims].[dbo].[CurfewSegment] + """.strip(), + + "g4s_emsys_tpims_dbo_GPSPositionLatest": """ + SELECT [GPSPositionID] + ,[PersonID] + ,[DeviceID] + ,[Latitude] + ,[Longitude] + ,[RecordedDatetime] + ,[Source] + ,[Pdop] + ,[Hdop] + ,[Vdop] + ,[Speed] + ,[Direction] + ,[SequenceNumber] + ,[AuditDateTime] + , SpatialPosition.STAsText() AS SpatialPosition + ,[SeparationViolation] + FROM [g4s_emsys_tpims].[dbo].[GPSPositionLatest] + """.strip() + + } \ No newline at end of file diff --git a/terraform/environments/electronic-monitoring-data/lake_formation.tf b/terraform/environments/electronic-monitoring-data/lake_formation.tf new file mode 100644 index 00000000000..8c05daea3de --- /dev/null +++ b/terraform/environments/electronic-monitoring-data/lake_formation.tf @@ -0,0 +1,22 @@ +# ------------------------------------------------------------------------ +# Lake Formation - admin permissions +# https://user-guide.modernisation-platform.service.justice.gov.uk/runbooks/adding-admin-data-lake-formation-permissions.html +# ------------------------------------------------------------------------ + +data "aws_iam_role" "github_actions_role" { + name = "github-actions" +} + +data "aws_iam_roles" "modernisation_platform_sandbox_role" { + name_regex = "AWSReservedSSO_modernisation-platform-sandbox_.*" + path_prefix = "/aws-reserved/sso.amazonaws.com/" +} + +resource "aws_lakeformation_data_lake_settings" "emds_development" { + count = local.is-development ? 1 : 0 + + admins = [ + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-reserved/sso.amazonaws.com/${data.aws_region.current.name}/${one(data.aws_iam_roles.modernisation_platform_sandbox_role.names)}", + data.aws_iam_role.github_actions_role.arn + ] +} diff --git a/terraform/environments/electronic-monitoring-data/lambdas/create_athena_table.zip b/terraform/environments/electronic-monitoring-data/lambdas/create_athena_table.zip new file mode 100644 index 00000000000..5c34af4be11 Binary files /dev/null and b/terraform/environments/electronic-monitoring-data/lambdas/create_athena_table.zip differ diff --git a/terraform/environments/electronic-monitoring-data/lambdas/get_file_keys_for_table.zip b/terraform/environments/electronic-monitoring-data/lambdas/get_file_keys_for_table.zip new file mode 100644 index 00000000000..76e274356ac Binary files /dev/null and b/terraform/environments/electronic-monitoring-data/lambdas/get_file_keys_for_table.zip differ diff --git a/terraform/environments/electronic-monitoring-data/lambdas/query_output_to_list.zip b/terraform/environments/electronic-monitoring-data/lambdas/query_output_to_list.zip new file mode 100644 index 00000000000..49be0b66e2b Binary files /dev/null and b/terraform/environments/electronic-monitoring-data/lambdas/query_output_to_list.zip differ diff --git a/terraform/environments/electronic-monitoring-data/lambdas/send_table_to_ap.zip b/terraform/environments/electronic-monitoring-data/lambdas/send_table_to_ap.zip new file mode 100644 index 00000000000..b507fcf61e4 Binary files /dev/null and b/terraform/environments/electronic-monitoring-data/lambdas/send_table_to_ap.zip differ diff --git a/terraform/environments/electronic-monitoring-data/lambdas_iam.tf b/terraform/environments/electronic-monitoring-data/lambdas_iam.tf index 8dd8f5c058e..b452ed71980 100644 --- a/terraform/environments/electronic-monitoring-data/lambdas_iam.tf +++ b/terraform/environments/electronic-monitoring-data/lambdas_iam.tf @@ -492,58 +492,6 @@ resource "aws_iam_role" "rotate_iam_keys" { assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json } -#----------------------------------------------------------------------------------- -# Process landing bucket files -#----------------------------------------------------------------------------------- - -resource "aws_iam_role" "process_landing_bucket_files" { - name = "process_landing_bucket_files" - assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json -} - -data "aws_iam_policy_document" "process_landing_bucket_files_s3_policy_document" { - statement { - sid = "S3PermissionsForLandingBuckets" - effect = "Allow" - actions = [ - "s3:PutObjectTagging", - "s3:GetObject", - "s3:GetObjectTagging", - "s3:DeleteObject" - ] - resources = [ - "${module.s3-fms-general-landing-bucket.bucket_arn}/*", - "${module.s3-fms-specials-landing-bucket.bucket_arn}/*", - "${module.s3-mdss-general-landing-bucket.bucket_arn}/*", - "${module.s3-mdss-ho-landing-bucket.bucket_arn}/*", - "${module.s3-mdss-specials-landing-bucket.bucket_arn}/*", - ] - } - - statement { - sid = "S3PermissionsForReceivedFilesBucket" - effect = "Allow" - actions = [ - "s3:PutObject", - "s3:PutObjectTagging" - ] - resources = [ - "${module.s3-received-files-bucket.bucket.arn}/*", - ] - } -} - -resource "aws_iam_policy" "process_landing_bucket_files_s3" { - name = "process-landing-bucket-files-s3-policy" - description = "Policy for Lambda to create presigned url for unzipped file from S3" - policy = data.aws_iam_policy_document.process_landing_bucket_files_s3_policy_document.json -} - -resource "aws_iam_role_policy_attachment" "process_landing_bucket_files_s3_policy_policy_attachment" { - role = aws_iam_role.process_landing_bucket_files.name - policy_arn = aws_iam_policy.process_landing_bucket_files_s3.arn -} - #----------------------------------------------------------------------------------- # Virus scanning - definition upload #----------------------------------------------------------------------------------- @@ -631,3 +579,45 @@ resource "aws_iam_role_policy_attachment" "virus_scan_file_policy_attachment" { role = aws_iam_role.virus_scan_file.name policy_arn = aws_iam_policy.virus_scan_file.arn } + +#----------------------------------------------------------------------------------- +# Load FMS JSON data +#----------------------------------------------------------------------------------- + +resource "aws_iam_role" "format_json_fms_data" { + name = "format_json_fms_data" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json +} + +data "aws_iam_policy_document" "format_json_fms_data_policy_document" { + statement { + sid = "S3PermissionsForGetUnformattedJSONFiles" + effect = "Allow" + actions = [ + "s3:GetObject", + ] + resources = ["${module.s3-data-bucket.bucket.arn}/*"] + } + statement { + sid = "S3PermissionsForPutFormattedJSONFiles" + effect = "Allow" + actions = [ + "s3:PutObject", + "s3:PutObjectTagging", + ] + resources = [ + "${module.s3-raw-formatted-data-bucket.bucket.arn}/*", + ] + } +} + +resource "aws_iam_policy" "format_json_fms_data" { + name = "format-json-fms-data" + description = "Policy for Lambda to virus scan and move files" + policy = data.aws_iam_policy_document.format_json_fms_data_policy_document.json +} + +resource "aws_iam_role_policy_attachment" "format_json_fms_data_policy_attachment" { + role = aws_iam_role.format_json_fms_data.name + policy_arn = aws_iam_policy.format_json_fms_data.arn +} diff --git a/terraform/environments/electronic-monitoring-data/lambdas_main.tf b/terraform/environments/electronic-monitoring-data/lambdas_main.tf index 35b62e0b881..df247af5d39 100644 --- a/terraform/environments/electronic-monitoring-data/lambdas_main.tf +++ b/terraform/environments/electronic-monitoring-data/lambdas_main.tf @@ -254,25 +254,6 @@ module "rotate_iam_key" { production_dev = local.is-production ? "prod" : "dev" } -#----------------------------------------------------------------------------------- -# Process landing bucket files -#----------------------------------------------------------------------------------- - -module "process_landing_bucket_files" { - source = "./modules/lambdas" - function_name = "process_landing_bucket_files" - is_image = true - role_name = aws_iam_role.process_landing_bucket_files.name - role_arn = aws_iam_role.process_landing_bucket_files.arn - memory_size = 1024 - timeout = 900 - core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] - production_dev = local.is-production ? "prod" : "dev" - environment_variables = { - DESTINATION_BUCKET = module.s3-received-files-bucket.bucket.id - } -} - #----------------------------------------------------------------------------------- # Virus scanning - definition upload #----------------------------------------------------------------------------------- @@ -326,3 +307,19 @@ module "virus_scan_file" { PROCESSED_BUCKET_NAME = module.s3-data-bucket.bucket.id } } + +#----------------------------------------------------------------------------------- +# Process json files +#----------------------------------------------------------------------------------- + +module "format_json_fms_data" { + source = "./modules/lambdas" + function_name = "format_json_fms_data" + is_image = true + role_name = aws_iam_role.format_json_fms_data.name + role_arn = aws_iam_role.format_json_fms_data.arn + memory_size = 1024 + timeout = 900 + core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] + production_dev = local.is-production ? "prod" : "dev" +} diff --git a/terraform/environments/electronic-monitoring-data/locals.tf b/terraform/environments/electronic-monitoring-data/locals.tf index 9b1db0bcd98..1abe4af6229 100644 --- a/terraform/environments/electronic-monitoring-data/locals.tf +++ b/terraform/environments/electronic-monitoring-data/locals.tf @@ -116,9 +116,15 @@ locals { #---------------------------------------------------------------------------- g4s_ssh_keys = [ "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBK85G9UwgU1KKgsYXfTWDsT4MqGSmjku1XGpH1EqmSuXLk5lmwFsgoLqqsROq2oEw2Yrr3uLyNVY2Dl6Pfm+dkdljfbPtqku+AkRSkhDo4K7bIwhWPh7HImcalxhde6BUA== ecdsa-key-20240208", + "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBEXJdSFcodesKVvDRdJYySLZ7RSmkHDadklPTi1M4GId09+9hD9VoCbLWJsDbbDtXEkts63oNOIBcF8w1KfkC1O0N7VPumJ6VkklXNBrhDPJu3JvENZW/bX2JDPC+/gYdg== ecdsa-key-20241125", + "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBJ11LVR2KRfiTbziv7Xkr7RfDCI502InqqBlAKxDiQQgEeGkRJQNI11e/uSQTZCgaj/F4AXadBvaJ0buH478q1+FBZ8pl7EkZlxeRky3vBu0hPFNN6+9D8Q//uGpEKSu+w== ecdsa-key-20241125", + "ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBGa8I/XEQt/HkWvjEXip9Ob0xgsUb47dyAoJ3htuc/pp0oxf2xpYk1YkdzQt8jo8b6effc2e5mf6MdEdMo6t/ck9TnER5IOs/BeurNTnlzq2JW6RDLBmhrB5yyfcYf9nyA== ecdsa-key-20241125", ] g4s_cidr_ipv4s = [ "18.135.195.129/32", + "18.130.124.178/32", + "18.171.111.175/32", + "35.178.248.3/32", ] g4s_cidr_ipv6s = [] diff --git a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/main.tf b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/main.tf index 2d50d779061..ebf0aefebe9 100644 --- a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/main.tf +++ b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/main.tf @@ -42,6 +42,7 @@ resource "aws_iam_role" "role_ap_airflow" { description = var.role_description assume_role_policy = data.aws_iam_policy_document.oidc_assume_role_policy.json force_detach_policies = true + max_session_duration = var.max_session_duration } resource "aws_iam_policy" "role_ap_airflow" { diff --git a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/variables.tf b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/variables.tf index 448df15b95b..51a8f8b9b24 100644 --- a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/variables.tf +++ b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_iam_role/variables.tf @@ -27,3 +27,10 @@ variable "oidc_arn" { type = string nullable = false } + +variable "max_session_duration" { + type = number + description = "max session duration for the role in seconds" + nullable = true + default = 7200 +} diff --git a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/main.tf b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/main.tf index d4fc62fbaa8..795835685ea 100644 --- a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/main.tf +++ b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/main.tf @@ -22,6 +22,7 @@ data "aws_iam_policy_document" "load_data" { resources = [ "${var.source_data_bucket.arn}${var.path_to_data}/*", "${var.source_data_bucket.arn}/staging${var.path_to_data}/*", + "${var.cadt_bucket.arn}/staging/${local.snake-database}/*", "${var.cadt_bucket.arn}/staging${var.path_to_data}/*", "${var.athena_dump_bucket.arn}/output/*" ] @@ -50,7 +51,7 @@ data "aws_iam_policy_document" "load_data" { ] } statement { - sid = "GluePermissionsForLoadAtriumUnstructured${local.camel-sid}" + sid = "GluePermissionsForLoad${local.camel-sid}" effect = "Allow" actions = [ "glue:GetTable", @@ -87,10 +88,11 @@ data "aws_iam_policy_document" "load_data" { module "load_unstructured_atrium_database" { source = "../ap_airflow_iam_role" - environment = var.environment - role_name_suffix = "load-${var.name}" - role_description = "${var.name} database permissions" - iam_policy_document = data.aws_iam_policy_document.load_data.json - secret_code = var.secret_code - oidc_arn = var.oidc_arn + environment = var.environment + role_name_suffix = "load-${var.name}" + role_description = "${var.name} database permissions" + iam_policy_document = data.aws_iam_policy_document.load_data.json + secret_code = var.secret_code + oidc_arn = var.oidc_arn + max_session_duration = var.max_session_duration } diff --git a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/variables.tf b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/variables.tf index 9ed20efb3c4..1e3fedb4918 100644 --- a/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/variables.tf +++ b/terraform/environments/electronic-monitoring-data/modules/ap_airflow_load_data_iam_role/variables.tf @@ -42,4 +42,11 @@ variable "oidc_arn" { variable "cadt_bucket" { type = object({ arn = string }) description = "bucket for cadt" -} \ No newline at end of file +} + +variable "max_session_duration" { + type = number + description = "max session duration for the role in seconds" + nullable = true + default = 7200 +} diff --git a/terraform/environments/electronic-monitoring-data/modules/dms/RDS_MSSQLServer_DB_Scripts/D_Comments_V2_DDL.sql b/terraform/environments/electronic-monitoring-data/modules/dms/RDS_MSSQLServer_DB_Scripts/D_Comments_V2_DDL.sql deleted file mode 100644 index 4919f587c5a..00000000000 --- a/terraform/environments/electronic-monitoring-data/modules/dms/RDS_MSSQLServer_DB_Scripts/D_Comments_V2_DDL.sql +++ /dev/null @@ -1,59 +0,0 @@ -SET ANSI_NULLS ON -GO -SET QUOTED_IDENTIFIER ON -GO -CREATE TABLE [dbo].[D_Comments_V2]( - [CommentSID] [int] IDENTITY(1,1) NOT NULL, - [VisitID] [int] NULL, - [ActivityID] [uniqueidentifier] NULL, - [Comments] [varchar](4200) NULL, - [CommentType] [varchar](50) NULL -) ON [PRIMARY] -GO -CREATE CLUSTERED INDEX [PK_D_Comments_V2] ON [dbo].[D_Comments_V2] -( - [CommentSID] ASC -)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] -GO -SET ANSI_PADDING ON -GO -CREATE NONCLUSTERED INDEX [I1_D_Comments_V2] ON [dbo].[D_Comments_V2] -( - [VisitID] ASC, - [CommentType] ASC -) -INCLUDE([CommentSID]) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] -GO -SET ANSI_PADDING ON -GO -CREATE NONCLUSTERED INDEX [I2_D_Comments_V2] ON [dbo].[D_Comments_V2] -( - [CommentType] ASC -) -INCLUDE([CommentSID],[ActivityID]) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] -GO -CREATE NONCLUSTERED INDEX [I3_D_Comments_V2] ON [dbo].[D_Comments_V2] -( - [VisitID] ASC, - [ActivityID] ASC -)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] -GO -SET ANSI_PADDING ON -GO -CREATE NONCLUSTERED INDEX [I4_D_Comments_V2] ON [dbo].[D_Comments_V2] -( - [ActivityID] ASC, - [CommentType] ASC -) -INCLUDE([CommentSID]) WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] -GO -EXEC sys.sp_addextendedproperty @name=N'MS_Description', @value=N'Database ID' , @level0type=N'SCHEMA',@level0name=N'dbo', @level1type=N'TABLE',@level1name=N'D_Comments_V2', @level2type=N'COLUMN',@level2name=N'CommentSID' -GO -EXEC sys.sp_addextendedproperty @name=N'MS_Description', @value=N'Database ID' , @level0type=N'SCHEMA',@level0name=N'dbo', @level1type=N'TABLE',@level1name=N'D_Comments_V2', @level2type=N'COLUMN',@level2name=N'VisitID' -GO -EXEC sys.sp_addextendedproperty @name=N'MS_Description', @value=N'Database ID' , @level0type=N'SCHEMA',@level0name=N'dbo', @level1type=N'TABLE',@level1name=N'D_Comments_V2', @level2type=N'COLUMN',@level2name=N'ActivityID' -GO -EXEC sys.sp_addextendedproperty @name=N'MS_Description', @value=N'Free text comments summarising either a phone call or a visit adding detail that isnt captured else where ' , @level0type=N'SCHEMA',@level0name=N'dbo', @level1type=N'TABLE',@level1name=N'D_Comments_V2', @level2type=N'COLUMN',@level2name=N'Comments' -GO -EXEC sys.sp_addextendedproperty @name=N'MS_Description', @value=N'identifies if the comment belongs to either a phone call or a visit' , @level0type=N'SCHEMA',@level0name=N'dbo', @level1type=N'TABLE',@level1name=N'D_Comments_V2', @level2type=N'COLUMN',@level2name=N'CommentType' -GO diff --git a/terraform/environments/electronic-monitoring-data/modules/dms/RDS_MSSQLServer_DB_Scripts/D_Comments_V2_Insert_Into_Select.sql b/terraform/environments/electronic-monitoring-data/modules/dms/RDS_MSSQLServer_DB_Scripts/D_Comments_V2_Insert_Into_Select.sql deleted file mode 100644 index 9d7179f2078..00000000000 --- a/terraform/environments/electronic-monitoring-data/modules/dms/RDS_MSSQLServer_DB_Scripts/D_Comments_V2_Insert_Into_Select.sql +++ /dev/null @@ -1,31 +0,0 @@ -SET IDENTITY_INSERT g4s_cap_dw.dbo.D_Comments_V2 ON -; - -truncate table [g4s_cap_dw].[dbo].[D_Comments_V2]; - -INSERT INTO g4s_cap_dw.dbo.D_Comments_V2 (CommentSID, VisitID, ActivityID, Comments, CommentType) - SELECT CommentSID, VisitID, ActivityID, - trim(replace(replace(Comments, char(141), ''), char(129), '')) AS Comments, - CommentType - FROM g4s_cap_dw.dbo.D_Comments -; - -SET IDENTITY_INSERT g4s_cap_dw.dbo.D_Comments_V2 OFF; - - --- TESTING QUERIES -- --- --- SELECT COUNT(*) FROM g4s_cap_dw.dbo.D_Comments_V2; -- 49695569 --- SELECT COUNT(*) FROM g4s_cap_dw.dbo.D_Comments; -- 49695569 --- - - --- --- SELECT 'D_Comments' AS TableName, Comments --- FROM g4s_cap_dw.dbo.D_Comments --- WHERE CommentSID = 26837791 --- UNION --- SELECT 'D_Comments_V2' AS TableName, Comments --- FROM g4s_cap_dw.dbo.D_Comments_V2 --- WHERE CommentSID = 26837791 --- \ No newline at end of file diff --git a/terraform/environments/electronic-monitoring-data/modules/landing_bucket/main.tf b/terraform/environments/electronic-monitoring-data/modules/landing_bucket/main.tf index 3c5c67d8f07..0c08d9bbd05 100644 --- a/terraform/environments/electronic-monitoring-data/modules/landing_bucket/main.tf +++ b/terraform/environments/electronic-monitoring-data/modules/landing_bucket/main.tf @@ -61,7 +61,7 @@ module "this-bucket" { effect = "Allow" actions = [ "s3:PutObject", - "s3:PutObjectAcl" + "s3:PutObjectAcl", ] principals = { identifiers = ["arn:aws:iam::${var.cross_account_access_role.account_number}:role/${var.cross_account_access_role.role_name}"] @@ -77,10 +77,54 @@ module "this-bucket" { ) } +#----------------------------------------------------------------------------------- +# KMS - customer managed key for use with cross account data +#----------------------------------------------------------------------------------- + +module "kms_key" { + #checkov:skip=CKV_TF_1:Module registry does not support commit hashes for versions + #checkov:skip=CKV_TF_2:Module registry does not support tags for versions + + source = "terraform-aws-modules/kms/aws" + version = "3.1.1" + + aliases = ["s3/landing_bucket_${var.data_feed}_${var.order_type}"] + description = "${var.data_feed} ${var.order_type} landing bucket KMS key" + + # Give full access to key for root account, and lambda role ability to use. + enable_default_policy = true + key_users = [aws_iam_role.process_landing_bucket_files.arn] + + deletion_window_in_days = 7 + + # Grant external account role specific operations. + # To view grants, need to use cli: + # aws kms list-grants --region=eu-west-2 --key-id + grants = var.cross_account_access_role != null ? { + cross_account_access_role = { + grantee_principal = "arn:aws:iam::${var.cross_account_access_role.account_number}:role/${var.cross_account_access_role.role_name}" + operations = [ + "Encrypt", + "GenerateDataKey", + ] + } + } : {} + + tags = merge( + var.local_tags, + { order_type = var.order_type }, + { data_feed = var.data_feed } + ) +} + +#----------------------------------------------------------------------------------- +# Process landing bucket files - lambda triggers +#----------------------------------------------------------------------------------- + resource "aws_lambda_permission" "allow_bucket" { statement_id = "AllowExecutionFromS3Bucket-${var.data_feed}-${var.order_type}" action = "lambda:InvokeFunction" - function_name = var.s3_trigger_lambda_arn + function_name = module.process_landing_bucket_files.lambda_function_arn principal = "s3.amazonaws.com" source_arn = module.this-bucket.bucket.arn } @@ -89,9 +133,99 @@ resource "aws_s3_bucket_notification" "bucket_notification" { bucket = module.this-bucket.bucket.id lambda_function { - lambda_function_arn = var.s3_trigger_lambda_arn + lambda_function_arn = module.process_landing_bucket_files.lambda_function_arn events = ["s3:ObjectCreated:*"] } depends_on = [aws_lambda_permission.allow_bucket] } + +#----------------------------------------------------------------------------------- +# Process landing bucket files - lambda +#----------------------------------------------------------------------------------- + +module "process_landing_bucket_files" { + source = "../lambdas" + function_name = "process_landing_bucket_files_${var.data_feed}_${var.order_type}" + image_name = "process_landing_bucket_files" + is_image = true + role_name = aws_iam_role.process_landing_bucket_files.name + role_arn = aws_iam_role.process_landing_bucket_files.arn + memory_size = 1024 + timeout = 900 + core_shared_services_id = var.core_shared_services_id + production_dev = var.production_dev + environment_variables = { + DESTINATION_BUCKET = var.received_files_bucket_id + } +} + +#----------------------------------------------------------------------------------- +# Process landing bucket files - lambda IAM role and policy +#----------------------------------------------------------------------------------- + +resource "aws_iam_role" "process_landing_bucket_files" { + name = "process_landing_bucket_files_${var.data_feed}_${var.order_type}" + assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json +} + +data "aws_iam_policy_document" "process_landing_bucket_files_s3_policy_document" { + statement { + sid = "S3PermissionsForLandingBuckets" + effect = "Allow" + actions = [ + "s3:PutObjectTagging", + "s3:GetObject", + "s3:GetObjectTagging", + "s3:DeleteObject" + ] + resources = [ + "${module.this-bucket.bucket.arn}/*", + ] + } + + statement { + sid = "S3PermissionsForReceivedFilesBucket" + effect = "Allow" + actions = [ + "s3:PutObject", + "s3:PutObjectTagging" + ] + resources = [ + "arn:aws:s3:::${var.received_files_bucket_id}/*", + ] + } + + statement { + sid = "KMSDecryptObjects" + effect = "Allow" + actions = [ + "kms:Decrypt", + ] + resources = [ + module.kms_key.key_arn, + ] + } +} + +resource "aws_iam_policy" "process_landing_bucket_files_s3" { + name = "process_landing_bucket_files_s3_policy_${var.data_feed}_${var.order_type}" + description = "Policy for Lambda to process files in ${var.data_feed} ${var.order_type} landing bucket" + policy = data.aws_iam_policy_document.process_landing_bucket_files_s3_policy_document.json +} + +resource "aws_iam_role_policy_attachment" "process_landing_bucket_files_s3_policy_policy_attachment" { + role = aws_iam_role.process_landing_bucket_files.name + policy_arn = aws_iam_policy.process_landing_bucket_files_s3.arn +} + +data "aws_iam_policy_document" "lambda_assume_role" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + actions = ["sts:AssumeRole"] + } +} diff --git a/terraform/environments/electronic-monitoring-data/modules/landing_bucket/variables.tf b/terraform/environments/electronic-monitoring-data/modules/landing_bucket/variables.tf index ac72a17b0d2..1be92a4e491 100644 --- a/terraform/environments/electronic-monitoring-data/modules/landing_bucket/variables.tf +++ b/terraform/environments/electronic-monitoring-data/modules/landing_bucket/variables.tf @@ -1,3 +1,8 @@ +variable "core_shared_services_id" { + description = "The core shared services id" + type = string +} + variable "cross_account_access_role" { description = "An object containing the cross account number and role name." type = object({ @@ -40,7 +45,12 @@ variable "order_type" { type = string } -variable "s3_trigger_lambda_arn" { - description = "The lambda arn used with s3 notification to be triggered on ObjectCreated*" +variable "production_dev" { + description = "The environment the lambda is being deployed to" + type = string +} + +variable "received_files_bucket_id" { + description = "The id of the bucket data will be moved to" type = string } diff --git a/terraform/environments/electronic-monitoring-data/modules/landing_zone/main.tf b/terraform/environments/electronic-monitoring-data/modules/landing_zone/main.tf index 02676922572..ecfb2e6b309 100644 --- a/terraform/environments/electronic-monitoring-data/modules/landing_zone/main.tf +++ b/terraform/environments/electronic-monitoring-data/modules/landing_zone/main.tf @@ -40,7 +40,7 @@ resource "random_string" "this" { #tfsec:ignore:aws-s3-enable-versioning module "landing-bucket" { - source = "github.com/ministryofjustice/modernisation-platform-terraform-s3-bucket?ref=52a40b0" + source = "github.com/ministryofjustice/modernisation-platform-terraform-s3-bucket?ref=f759060" bucket_name = "${var.supplier}-${random_string.this.result}" replication_enabled = false providers = { diff --git a/terraform/environments/electronic-monitoring-data/s3.tf b/terraform/environments/electronic-monitoring-data/s3.tf index 8de10c7fa24..5b7fb963bbb 100644 --- a/terraform/environments/electronic-monitoring-data/s3.tf +++ b/terraform/environments/electronic-monitoring-data/s3.tf @@ -2,16 +2,16 @@ locals { bucket_prefix = "emds-${local.environment_shorthand}" mdss_supplier_account_mapping = { - "production" = null - "preproduction" = { + "production" = null + "preproduction" = null + "test" = { "account_number" = 173142358744 - "role_name" = "juniper-dt-lambda-role" + "role_name" = "dev-datatransfer-lambda-role" } - "test" = { + "development" = { "account_number" = 173142358744 - role_name = "dev-dt-lambda-role" + "role_name" = "dev-datatransfer-lambda-role" } - "development" = null } } @@ -546,12 +546,15 @@ module "s3-data-bucket" { module "s3-fms-general-landing-bucket" { source = "./modules/landing_bucket/" - data_feed = "fms" - local_bucket_prefix = local.bucket_prefix - local_tags = local.tags - logging_bucket = module.s3-logging-bucket - order_type = "general" - s3_trigger_lambda_arn = module.process_landing_bucket_files.lambda_function_arn + data_feed = "fms" + order_type = "general" + + core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] + local_bucket_prefix = local.bucket_prefix + local_tags = local.tags + logging_bucket = module.s3-logging-bucket + production_dev = local.is-production ? "prod" : "dev" + received_files_bucket_id = module.s3-received-files-bucket.bucket.id providers = { aws = aws @@ -561,11 +564,12 @@ module "s3-fms-general-landing-bucket" { module "s3-fms-general-landing-bucket-iam-user" { source = "./modules/landing_bucket_iam_user_access/" - data_feed = "fms" + data_feed = "fms" + order_type = "general" + landing_bucket_arn = module.s3-fms-general-landing-bucket.bucket_arn local_bucket_prefix = local.bucket_prefix local_tags = local.tags - order_type = "general" rotation_lambda = module.rotate_iam_key rotation_lambda_role_name = aws_iam_role.rotate_iam_keys.name } @@ -573,12 +577,15 @@ module "s3-fms-general-landing-bucket-iam-user" { module "s3-fms-specials-landing-bucket" { source = "./modules/landing_bucket/" - data_feed = "fms" - local_bucket_prefix = local.bucket_prefix - local_tags = local.tags - logging_bucket = module.s3-logging-bucket - order_type = "specials" - s3_trigger_lambda_arn = module.process_landing_bucket_files.lambda_function_arn + data_feed = "fms" + order_type = "specials" + + core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] + local_bucket_prefix = local.bucket_prefix + local_tags = local.tags + logging_bucket = module.s3-logging-bucket + production_dev = local.is-production ? "prod" : "dev" + received_files_bucket_id = module.s3-received-files-bucket.bucket.id providers = { aws = aws @@ -588,11 +595,12 @@ module "s3-fms-specials-landing-bucket" { module "s3-fms-specials-landing-bucket-iam-user" { source = "./modules/landing_bucket_iam_user_access/" - data_feed = "fms" + data_feed = "fms" + order_type = "specials" + landing_bucket_arn = module.s3-fms-specials-landing-bucket.bucket_arn local_bucket_prefix = local.bucket_prefix local_tags = local.tags - order_type = "specials" rotation_lambda = module.rotate_iam_key rotation_lambda_role_name = aws_iam_role.rotate_iam_keys.name } @@ -607,11 +615,13 @@ module "s3-mdss-general-landing-bucket" { data_feed = "mdss" order_type = "general" + core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] cross_account_access_role = local.mdss_supplier_account_mapping[local.environment] local_bucket_prefix = local.bucket_prefix local_tags = local.tags logging_bucket = module.s3-logging-bucket - s3_trigger_lambda_arn = module.process_landing_bucket_files.lambda_function_arn + production_dev = local.is-production ? "prod" : "dev" + received_files_bucket_id = module.s3-received-files-bucket.bucket.id providers = { aws = aws @@ -624,11 +634,13 @@ module "s3-mdss-ho-landing-bucket" { data_feed = "mdss" order_type = "ho" + core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] cross_account_access_role = local.mdss_supplier_account_mapping[local.environment] local_bucket_prefix = local.bucket_prefix local_tags = local.tags logging_bucket = module.s3-logging-bucket - s3_trigger_lambda_arn = module.process_landing_bucket_files.lambda_function_arn + production_dev = local.is-production ? "prod" : "dev" + received_files_bucket_id = module.s3-received-files-bucket.bucket.id providers = { aws = aws @@ -641,11 +653,13 @@ module "s3-mdss-specials-landing-bucket" { data_feed = "mdss" order_type = "specials" + core_shared_services_id = local.environment_management.account_ids["core-shared-services-production"] cross_account_access_role = local.mdss_supplier_account_mapping[local.environment] local_bucket_prefix = local.bucket_prefix local_tags = local.tags logging_bucket = module.s3-logging-bucket - s3_trigger_lambda_arn = module.process_landing_bucket_files.lambda_function_arn + production_dev = local.is-production ? "prod" : "dev" + received_files_bucket_id = module.s3-received-files-bucket.bucket.id providers = { aws = aws @@ -675,7 +689,7 @@ module "s3-p1-export-bucket" { module "s3-serco-export-bucket" { source = "./modules/export_bucket_presigned_url/" - allowed_ips = null + allowed_ips = ["137.83.234.93/32", "130.41.187.248/32"] export_destination = "serco-historic" local_bucket_prefix = local.bucket_prefix local_tags = local.tags @@ -1243,3 +1257,80 @@ module "s3-create-a-derived-table-bucket" { tags = local.tags } + + +# ------------------------------------------------------------------------ +# Raw converted store bucket +# ------------------------------------------------------------------------ + +module "s3-raw-formatted-data-bucket" { + source = "github.com/ministryofjustice/modernisation-platform-terraform-s3-bucket?ref=f759060" + bucket_prefix = "${local.bucket_prefix}-raw-formatted-data-" + versioning_enabled = true + + # to disable ACLs in preference of BucketOwnership controls as per https://aws.amazon.com/blogs/aws/heads-up-amazon-s3-security-changes-are-coming-in-april-of-2023/ set: + ownership_controls = "BucketOwnerEnforced" + acl = "private" + + # Refer to the below section "Replication" before enabling replication + replication_enabled = false + # Below variable and providers configuration is only relevant if 'replication_enabled' is set to true + # replication_region = "eu-west-2" + providers = { + # Here we use the default provider Region for replication. Destination buckets can be within the same Region as the + # source bucket. On the other hand, if you need to enable cross-region replication, please contact the Modernisation + # Platform team to add a new provider for the additional Region. + # Leave this provider block in even if you are not using replication + aws.bucket-replication = aws + } + log_buckets = tomap({ + "log_bucket_name" : module.s3-logging-bucket.bucket.id, + "log_bucket_arn" : module.s3-logging-bucket.bucket.arn, + "log_bucket_policy" : module.s3-logging-bucket.bucket_policy.policy, + }) + log_prefix = "logs/${local.bucket_prefix}-raw-formatted-data/" + log_partition_date_source = "EventTime" + + lifecycle_rule = [ + { + id = "main" + enabled = "Enabled" + prefix = "" + + tags = { + rule = "log" + autoclean = "true" + } + + transition = [ + { + days = 183 + storage_class = "STANDARD_IA" + }, { + days = 730 + storage_class = "GLACIER" + } + ] + + expiration = { + days = 10000 + } + + noncurrent_version_transition = [ + { + days = 30 + storage_class = "STANDARD_IA" + }, { + days = 90 + storage_class = "GLACIER" + } + ] + + noncurrent_version_expiration = { + days = 365 + } + } + ] + + tags = local.tags +} diff --git a/terraform/environments/electronic-monitoring-data/transfer_server_accounts.tf b/terraform/environments/electronic-monitoring-data/transfer_server_accounts.tf index b535f9af2ec..dd447261135 100644 --- a/terraform/environments/electronic-monitoring-data/transfer_server_accounts.tf +++ b/terraform/environments/electronic-monitoring-data/transfer_server_accounts.tf @@ -99,11 +99,11 @@ module "g4s" { # local.sftp_account_dev, # Test account for supplier. - # local.sftp_account_g4s_test, + local.sftp_account_g4s_test, # Accounts for each system to be migrated. # local.sftp_account_g4s_atrium, - local.sftp_account_g4s_atrium_unstructured, + # local.sftp_account_g4s_atrium_unstructured, # local.sftp_account_g4s_cap_dw, # local.sftp_account_g4s_integrity, # local.sftp_account_g4s_telephony, @@ -113,7 +113,7 @@ module "g4s" { # local.sftp_account_g4s_atv, # local.sftp_account_g4s_emsys_mvp, # local.sftp_account_g4s_emsys_tpims, - local.sftp_account_g4s_x_drive, + # local.sftp_account_g4s_x_drive, ] data_store_bucket = module.s3-data-bucket.bucket diff --git a/terraform/environments/example/s3_malware_protection.tf b/terraform/environments/example/s3_malware_protection.tf new file mode 100644 index 00000000000..28b75e542c2 --- /dev/null +++ b/terraform/environments/example/s3_malware_protection.tf @@ -0,0 +1,44 @@ +# Variable to allow bucket selection +variable "buckets_to_protect" { + description = < merge( + module.baseline_presets.cloudwatch_metric_alarms.ec2_instance_cwagent_collectd_endpoint_monitoring["endpoint-down"], + { dimensions = { type = "exitcode" - type_instance = "hmpps-az-gw1.justice.gov.uk" + type_instance = value[0] } - }) - - "endpoint-down-hmpps-domain-rdgateway" = merge(local.endpoint_down_alarm, { + alarm_actions = [value[2]] + ok_actions = [value[2]] + } + ) + } + cloudwatch_metric_alarms_endpoint_monitoring_cert_expiry = { + for key, value in local.endpoint_alarms[local.environment] : "endpoint-cert-expires-soon-${key}" => merge( + module.baseline_presets.cloudwatch_metric_alarms.ec2_instance_cwagent_collectd_endpoint_monitoring["endpoint-cert-expires-soon"], + { dimensions = { type = "exitcode" - type_instance = "rdgateway1.hmpps-domain.service.justice.gov.uk" + type_instance = value[0] } - }) - } + alarm_actions = [value[2]] + ok_actions = [value[2]] + } + ) if value[1] == true } - cloudwatch_metric_alarms_endpoint_monitoring = local.cloudwatch_metric_alarms_endpoint_status_environment_specific[local.environment] + cloudwatch_metric_alarms_endpoint_monitoring = merge( + local.cloudwatch_metric_alarms_endpoint_monitoring_endpoint, + local.cloudwatch_metric_alarms_endpoint_monitoring_cert_expiry + ) } diff --git a/terraform/environments/hmpps-oem/locals_development.tf b/terraform/environments/hmpps-oem/locals_development.tf index 1ae75cf8cdf..dd5f66dae3f 100644 --- a/terraform/environments/hmpps-oem/locals_development.tf +++ b/terraform/environments/hmpps-oem/locals_development.tf @@ -2,11 +2,23 @@ locals { baseline_presets_development = { options = { + cloudwatch_dashboard_default_widget_groups = [ + "lb", + "ec2", + "ec2_linux", + "ec2_autoscaling_group_linux", + "ec2_instance_linux", + "ec2_instance_oracle_db_with_backup", + "ec2_windows", + "ssm_command", + ] + enable_ec2_delius_dba_secrets_access = true sns_topics = { pagerduty_integrations = { - pagerduty = "hmpps-oem-development" + dso-pipelines-pagerduty = "dso-pipelines" + pagerduty = "hmpps-oem-development" } } } @@ -15,6 +27,63 @@ locals { # please keep resources in alphabetical order baseline_development = { + "endpoints-and-pipelines" = { + account_name = "hmpps-oem-development" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "hmpps-oem-development" = { + account_name = null + periodOverride = "auto" + start = "-PT6H" + widget_groups = [{ + header_markdown = "## EC2 Oracle Enterprise Management" + width = 8 + height = 8 + add_ebs_widgets = { + iops = true + throughput = true + } + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "hmpps-oem" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + ] + }] + } + "nomis-development" = { + account_name = "nomis-development" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + ec2_autoscaling_groups = { dev-base-ol85 = { autoscaling_group = { diff --git a/terraform/environments/hmpps-oem/locals_preproduction.tf b/terraform/environments/hmpps-oem/locals_preproduction.tf index c00a01ea34b..e1f15fe3747 100644 --- a/terraform/environments/hmpps-oem/locals_preproduction.tf +++ b/terraform/environments/hmpps-oem/locals_preproduction.tf @@ -2,11 +2,34 @@ locals { baseline_presets_preproduction = { options = { + cloudwatch_dashboard_default_widget_groups = [ + "ec2_instance_endpoint_monitoring", + "network_lb", + "lb", + "ec2", + "ec2_linux", + "ec2_autoscaling_group_linux", + "ec2_instance_linux", + "ec2_instance_oracle_db_with_backup", + "ec2_instance_textfile_monitoring", + "ec2_windows", + "ssm_command", + ] + enable_ec2_delius_dba_secrets_access = true sns_topics = { pagerduty_integrations = { - pagerduty = "hmpps-oem-preproduction" + azure-fixngo-pagerduty = "az-noms-production-1-alerts" + corporate-staff-rostering-pagerduty = "corporate-staff-rostering-preproduction" + dso-pipelines-pagerduty = "dso-pipelines" + hmpps-domain-services-pagerduty = "hmpps-domain-services-preproduction" + nomis-combined-reporting-pagerduty = "nomis-combined-reporting-preproduction" + nomis-pagerduty = "nomis-preproduction" + oasys-national-reporting-pagerduty = "oasys-national-reporting-preproduction" + oasys-pagerduty = "oasys-preproduction" + pagerduty = "hmpps-oem-preproduction" + planetfm-pagerduty = "planetfm-preproduction" } } } @@ -15,6 +38,222 @@ locals { # please keep resources in alphabetical order baseline_preproduction = { + cloudwatch_dashboards = { + "corporate-staff-rostering-preproduction" = { + account_name = "corporate-staff-rostering-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "r1.pp.csr.service.justice.gov.uk", + "r2.pp.csr.service.justice.gov.uk", + "r3.pp.csr.service.justice.gov.uk", + "r4.pp.csr.service.justice.gov.uk", + "r5.pp.csr.service.justice.gov.uk", + "r6.pp.csr.service.justice.gov.uk", + "traina.csr.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.network_lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + ] + } + "endpoints-and-pipelines" = { + account_name = "hmpps-oem-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "hmpps-domain-services-preproduction" = { + account_name = "hmpps-domain-services-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "rdgateway1.preproduction.hmpps-domain.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "hmpps-oem-preproduction" = { + account_name = null + periodOverride = "auto" + start = "-PT6H" + widget_groups = [{ + header_markdown = "## EC2 Oracle Enterprise Management" + width = 8 + height = 8 + add_ebs_widgets = { + iops = true + throughput = true + } + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "hmpps-oem" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + ] + }] + } + "nomis-preproduction" = { + account_name = "nomis-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "c-lsast.preproduction.nomis.service.justice.gov.uk", + "c.preproduction.nomis.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_textfile_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "nomis-combined-reporting-preproduction" = { + account_name = "nomis-combined-reporting-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "reporting.pp-nomis.az.justice.gov.uk", + "preproduction.reporting.nomis.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_filesystems, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "nomis-data-hub-preproduction" = { + account_name = "nomis-data-hub-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_textfile_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "oasys-preproduction" = { + account_name = "oasys-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "pp.oasys.service.justice.gov.uk", + "pp-int.oasys.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + ] + } + "oasys-national-reporting-preproduction" = { + account_name = "oasys-national-reporting-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "onr.pp-oasys.az.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "planetfm-preproduction" = { + account_name = "planetfm-preproduction" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-preproduction" + search_filter_dimension = { + name = "type_instance" + values = [ + "cafmtx.pp.planetfm.service.justice.gov.uk", + "cafmwebx.pp.planetfm.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.network_lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + } + ec2_instances = { preprod-oem-a = merge(local.ec2_instances.oem, { diff --git a/terraform/environments/hmpps-oem/locals_production.tf b/terraform/environments/hmpps-oem/locals_production.tf index c235d90ddc5..5495d32d650 100644 --- a/terraform/environments/hmpps-oem/locals_production.tf +++ b/terraform/environments/hmpps-oem/locals_production.tf @@ -2,9 +2,32 @@ locals { baseline_presets_production = { options = { + cloudwatch_dashboard_default_widget_groups = [ + "ec2_instance_endpoint_monitoring", + "network_lb", + "lb", + "ec2", + "ec2_linux", + "ec2_autoscaling_group_linux", + "ec2_instance_linux", + "ec2_instance_oracle_db_with_backup", + "ec2_instance_textfile_monitoring", + "ec2_windows", + "ssm_command", + ] + sns_topics = { pagerduty_integrations = { - pagerduty = "hmpps-oem-production" + azure-fixngo-pagerduty = "az-noms-production-1-alerts" + corporate-staff-rostering-pagerduty = "corporate-staff-rostering-production" + dso-pipelines-pagerduty = "dso-pipelines" + hmpps-domain-services-pagerduty = "hmpps-domain-services-production" + nomis-combined-reporting-pagerduty = "nomis-combined-reporting-production" + nomis-pagerduty = "nomis-production" + oasys-national-reporting-pagerduty = "oasys-national-reporting-production" + oasys-pagerduty = "oasys-production" + pagerduty = "hmpps-oem-production" + planetfm-pagerduty = "planetfm-production" } } } @@ -13,6 +36,223 @@ locals { # please keep resources in alphabetical order baseline_production = { + cloudwatch_dashboards = { + "corporate-staff-rostering-production" = { + account_name = "corporate-staff-rostering-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "r1.csr.service.justice.gov.uk", + "r2.csr.service.justice.gov.uk", + "r3.csr.service.justice.gov.uk", + "r4.csr.service.justice.gov.uk", + "r5.csr.service.justice.gov.uk", + "r6.csr.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.network_lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + ] + } + "endpoints-and-pipelines" = { + account_name = "hmpps-oem-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "hmpps-domain-services-production" = { + account_name = "hmpps-domain-services-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "rdgateway1.hmpps-domain.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "hmpps-oem-production" = { + account_name = null + periodOverride = "auto" + start = "-PT6H" + widget_groups = [{ + header_markdown = "## EC2 Oracle Enterprise Management" + width = 8 + height = 8 + add_ebs_widgets = { + iops = true + throughput = true + } + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "hmpps-oem" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + ] + }] + } + "nomis-production" = { + account_name = "nomis-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "c.nomis.az.justice.gov.uk", + "c.nomis.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_textfile_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "nomis-combined-reporting-production" = { + account_name = "nomis-combined-reporting-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "reporting.nomis.az.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_filesystems, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "nomis-data-hub-production" = { + account_name = "nomis-data-hub-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_textfile_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "oasys-production" = { + account_name = "oasys-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "oasys.service.justice.gov.uk", + "int.oasys.service.justice.gov.uk", + "practice.int.oasys.service.justice.gov.uk", + "training.int.oasys.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + ] + } + "oasys-national-reporting-production" = { + account_name = "oasys-national-reporting-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "onr.oasys.az.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "planetfm-production" = { + account_name = "planetfm-production" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-production" + search_filter_dimension = { + name = "type_instance" + values = [ + "cafmtx.planetfm.service.justice.gov.uk", + "cafmwebx2.planetfm.service.justice.gov.uk", + "cafmtrainweb.planetfm.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.network_lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + } + ec2_instances = { prod-oem-a = merge(local.ec2_instances.oem, { cloudwatch_metric_alarms = merge( diff --git a/terraform/environments/hmpps-oem/locals_test.tf b/terraform/environments/hmpps-oem/locals_test.tf index ff162caeb4f..adc401ce5f8 100644 --- a/terraform/environments/hmpps-oem/locals_test.tf +++ b/terraform/environments/hmpps-oem/locals_test.tf @@ -2,15 +2,30 @@ locals { baseline_presets_test = { options = { - cloudwatch_dashboard_default_widget_groups = flatten([ - local.cloudwatch_dashboard_default_widget_groups, - "github_workflows", # metrics are only pushed into test account - ]) + cloudwatch_dashboard_default_widget_groups = [ + "ec2_instance_endpoint_monitoring", + "lb", + "ec2", + "ec2_linux", + "ec2_autoscaling_group_linux", + "ec2_instance_linux", + "ec2_instance_oracle_db_with_backup", + "ec2_instance_textfile_monitoring", + "ec2_windows", + "ssm_command", + "github_workflows", + ] + enable_ec2_delius_dba_secrets_access = true sns_topics = { pagerduty_integrations = { - pagerduty = "hmpps-oem-test" + azure-fixngo-pagerduty = "az-noms-dev-test-environments-alerts" + dso-pipelines-pagerduty = "dso-pipelines" + hmpps-domain-services-pagerduty = "hmpps-domain-services-test" + nomis-pagerduty = "nomis-test" + oasys-pagerduty = "oasys-test" + pagerduty = "hmpps-oem-test" } } } @@ -19,7 +34,161 @@ locals { # please keep resources in alphabetical order baseline_test = { - cloudwatch_metric_alarms = module.baseline_presets.cloudwatch_metric_alarms.github + cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_by_sns_topic["dso-pipelines-pagerduty"].github + ) + + cloudwatch_dashboards = { + "endpoints-and-pipelines" = { + account_name = "hmpps-oem-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + module.baseline_presets.cloudwatch_dashboard_widget_groups.github_workflows, + ] + } + "hmpps-domain-services-test" = { + account_name = "hmpps-domain-services-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-test" + search_filter_dimension = { + name = "type_instance" + values = [ + "rdgateway1.test.hmpps-domain.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "hmpps-oem-test" = { + account_name = null + periodOverride = "auto" + start = "-PT6H" + widget_groups = [{ + header_markdown = "## EC2 Oracle Enterprise Management" + width = 8 + height = 8 + add_ebs_widgets = { + iops = true + throughput = true + } + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "hmpps-oem" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + ] + }] + } + "nomis-test" = { + account_name = "nomis-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-test" + search_filter_dimension = { + name = "type_instance" + values = [ + "c-t1.test.nomis.service.justice.gov.uk", + "c-t2.test.nomis.service.justice.gov.uk", + "c-t3.test.nomis.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_textfile_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "nomis-combined-reporting-test" = { + account_name = "nomis-combined-reporting-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_filesystems, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "nomis-data-hub-test" = { + account_name = "nomis-data-hub-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_textfile_monitoring, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + "oasys-test" = { + account_name = "oasys-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + merge(module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_endpoint_monitoring, { + account_name = "hmpps-oem-test" + search_filter_dimension = { + name = "type_instance" + values = [ + "t1-int.oasys.service.justice.gov.uk", + "t2-int.oasys.service.justice.gov.uk", + ] + } + }), + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_autoscaling_group_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_oracle_db_with_backup, + ] + } + "oasys-national-reporting-test" = { + account_name = "oasys-national-reporting-test" + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_instance_linux, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ec2_windows, + ] + } + } ec2_autoscaling_groups = { test-oem = merge(local.ec2_instances.oem, { diff --git a/terraform/environments/hmpps-oem/main.tf b/terraform/environments/hmpps-oem/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/hmpps-oem/main.tf +++ b/terraform/environments/hmpps-oem/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/long-term-storage/call-centre-migration.tf b/terraform/environments/long-term-storage/call-centre-migration.tf new file mode 100644 index 00000000000..ee503a4b930 --- /dev/null +++ b/terraform/environments/long-term-storage/call-centre-migration.tf @@ -0,0 +1,101 @@ +resource "aws_cloudwatch_log_group" "call_centre" { + name_prefix = "call-centre-migration" + retention_in_days = 365 + tags = local.tags +} + +resource "aws_kms_key" "call_centre" { + enable_key_rotation = true + rotation_period_in_days = 90 + tags = local.tags +} + +resource "aws_kms_key_policy" "call_centre" { + key_id = aws_kms_key.call_centre.id + policy = data.aws_iam_policy_document.call_centre_kms_policy.json +} + +resource "aws_s3_bucket" "call_centre" { + bucket_prefix = "call-centre-migration" + tags = local.tags +} + +resource "aws_s3_bucket_policy" "call_centre" { + bucket = aws_s3_bucket.call_centre.id + policy = data.aws_iam_policy_document.call_centre_bucket_policy.json +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "call_centre" { + bucket = aws_s3_bucket.call_centre.id + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.call_centre.arn + sse_algorithm = "aws:kms" + } + } +} + +resource "aws_secretsmanager_secret" "call_centre" { + description = "Secret containing key-value pairs for AWS Transfer connector." + force_overwrite_replica_secret = true + name = "aws/transfer/${aws_transfer_server.call_centre.id}/call-centre" + recovery_window_in_days = 0 + tags = local.tags +} + +# Because we populate the secret version manually, we refer to its contents with a data call +data "aws_secretsmanager_secret_version" "call_centre" { + secret_id = aws_secretsmanager_secret.call_centre.id +} + +resource "aws_transfer_server" "call_centre" { + logging_role = aws_iam_role.call_centre_transfer_logging.arn + structured_log_destinations = ["${aws_cloudwatch_log_group.call_centre.arn}:*"] + tags = merge( + local.tags, + { Name = "call-centre-migration" } + ) +} + +resource "aws_transfer_connector" "call_centre" { + access_role = aws_iam_role.call_centre_transfer_access.arn + logging_role = aws_iam_role.call_centre_transfer_logging.arn + sftp_config { + trusted_host_keys = [jsondecode(data.aws_secretsmanager_secret_version.call_centre.secret_string)["Fingerprint"]] + user_secret_id = aws_secretsmanager_secret.call_centre.id + } + url = jsondecode(data.aws_secretsmanager_secret_version.call_centre.secret_string)["Hostname"] + tags = merge( + local.tags, + { Name = "call-centre-migration-connector" } + ) +} + +resource "aws_iam_role" "call_centre_transfer_logging" { + name_prefix = "call-centre-migration-logging" + assume_role_policy = data.aws_iam_policy_document.aws_transfer_assume_role_policy.json + tags = local.tags +} + +resource "aws_iam_role" "call_centre_transfer_access" { + name_prefix = "call-centre-access" + assume_role_policy = data.aws_iam_policy_document.aws_transfer_assume_role_policy.json + tags = local.tags +} + +resource "aws_iam_policy" "call_centre_transfer_access" { + description = "Access policy for AWS Transfer connector." + name_prefix = "call-centre-access" + policy = data.aws_iam_policy_document.call_centre_access_policy.json + tags = local.tags +} + +resource "aws_iam_role_policy_attachment" "call_centre_transfer_access" { + policy_arn = aws_iam_policy.call_centre_transfer_access.arn + role = aws_iam_role.call_centre_transfer_access.name +} + +resource "aws_iam_role_policy_attachments_exclusive" "call_centre_transfer_logging" { + policy_arns = ["arn:aws:iam::aws:policy/service-role/AWSTransferLoggingAccess"] + role_name = aws_iam_role.call_centre_transfer_logging.name +} \ No newline at end of file diff --git a/terraform/environments/long-term-storage/data.tf b/terraform/environments/long-term-storage/data.tf index 96a2521d17e..aa18fc802af 100644 --- a/terraform/environments/long-term-storage/data.tf +++ b/terraform/environments/long-term-storage/data.tf @@ -1 +1,130 @@ #### This file can be used to store data specific to the member account #### +data "aws_iam_policy_document" "aws_transfer_assume_role_policy" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["transfer.amazonaws.com"] + } + actions = ["sts:AssumeRole"] + } +} + +data "aws_iam_policy_document" "call_centre_access_policy" { + statement { + actions = [ + "s3:ListBucket", + "s3:GetBucketLocation" + ] + effect = "Allow" + resources = [aws_s3_bucket.call_centre.arn] + sid = "AllowListingOfBucket" + } + statement { + actions = [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:GetObjectVersion", + "s3:GetObjectACL", + "s3:PutObjectACL" + ] + effect = "Allow" + resources = ["${aws_s3_bucket.call_centre.arn}/*"] + sid = "AllowAccessToBucketObjects" + } + statement { + actions = ["kms:*"] + effect = "Allow" + resources = [aws_kms_key.call_centre.arn] + sid = "AllowAccessToKMSKey" + } + statement { + actions = [ + "secretsmanager:BatchGet*", + "secretsmanager:Describe*", + "secretsmanager:Get*", + "secretsmanager:List*" + ] + effect = "Allow" + resources = [aws_secretsmanager_secret.call_centre.arn] + sid = "AllowAccessToSecrets" + } +} + +data "aws_iam_policy_document" "call_centre_bucket_policy" { + statement { + actions = [ + "s3:ListBucket", + "s3:GetBucketLocation" + ] + effect = "Allow" + principals { + type = "Service" + identifiers = ["transfer.amazonaws.com"] + } + resources = [aws_s3_bucket.call_centre.arn] + sid = "AllowListingOfBucket" + } + statement { + actions = [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:GetObjectVersion", + "s3:GetObjectACL", + "s3:PutObjectACL" + ] + effect = "Allow" + principals { + type = "Service" + identifiers = ["transfer.amazonaws.com"] + } + resources = ["${aws_s3_bucket.call_centre.arn}/*"] + sid = "AllowAccessToBucketObjects" + } +} + +data "aws_iam_policy_document" "call_centre_kms_policy" { + statement { + sid = "KeyAdministration" + effect = "Allow" + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] + } + + actions = ["kms:*"] + resources = [aws_kms_key.call_centre.arn] + } + statement { + sid = "AllowAWSServiceAccess" + effect = "Allow" + principals { + type = "Service" + identifiers = ["transfer.amazonaws.com", "s3.amazonaws.com"] + } + actions = [ + "kms:Decrypt", + "kms:DescribeKey", + "kms:Encrypt", + "kms:GenerateDataKey", + "kms:ReEncrypt*" + ] + condition { + test = "StringEquals" + values = [data.aws_caller_identity.current.account_id] + variable = "kms:CallerAccount" + } + condition { + test = "StringLike" + values = ["transfer.amazonaws.com", "s3.amazonaws.com"] + variable = "kms:ViaService" + } + resources = [aws_kms_key.call_centre.arn] + } +} \ No newline at end of file diff --git a/terraform/environments/nomis-combined-reporting/locals.tf b/terraform/environments/nomis-combined-reporting/locals.tf index 68b4c09eeca..3acc84b9578 100644 --- a/terraform/environments/nomis-combined-reporting/locals.tf +++ b/terraform/environments/nomis-combined-reporting/locals.tf @@ -27,6 +27,7 @@ locals { "ec2_instance_linux", "ec2_instance_oracle_db_with_backup", "ec2_windows", + "ssm_command", ] cloudwatch_metric_alarms_default_actions = ["pagerduty"] cloudwatch_metric_oam_links_ssm_parameters = ["hmpps-oem-${local.environment}"] @@ -44,6 +45,7 @@ locals { enable_s3_bucket = true enable_s3_db_backup_bucket = true enable_s3_software_bucket = true + enable_ssm_command_monitoring = true s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] } } diff --git a/terraform/environments/nomis-combined-reporting/locals_ec2_instances.tf b/terraform/environments/nomis-combined-reporting/locals_ec2_instances.tf index 06f4941b7b0..9756c1f7ade 100644 --- a/terraform/environments/nomis-combined-reporting/locals_ec2_instances.tf +++ b/terraform/environments/nomis-combined-reporting/locals_ec2_instances.tf @@ -331,7 +331,7 @@ locals { description = "Windows Server 2012 R2 client testing for NART" instance-access-policy = "full" os-type = "Windows" - server-type = "OnrClient" + server-type = "NcrClient" update-ssm-agent = "patchgroup1" } } diff --git a/terraform/environments/nomis-combined-reporting/locals_efs.tf b/terraform/environments/nomis-combined-reporting/locals_efs.tf index 5d86653b1d1..5d1a7aa8e3d 100644 --- a/terraform/environments/nomis-combined-reporting/locals_efs.tf +++ b/terraform/environments/nomis-combined-reporting/locals_efs.tf @@ -30,7 +30,8 @@ locals { security_groups = ["bip"] }] tags = { - backup = "false" + backup = "false" + backup-plan = "daily-and-weekly" } } } diff --git a/terraform/environments/nomis-combined-reporting/locals_secretsmanager.tf b/terraform/environments/nomis-combined-reporting/locals_secretsmanager.tf index bd30bf49807..722662cdb32 100644 --- a/terraform/environments/nomis-combined-reporting/locals_secretsmanager.tf +++ b/terraform/environments/nomis-combined-reporting/locals_secretsmanager.tf @@ -3,26 +3,34 @@ locals { secretsmanager_secrets = { bip = { secrets = { - passwords = { description = "BIP Passwords" } - config = { description = "BIP Configuration" } - } - } - bip_app = { - secrets = { - passwords = { description = "BIP Passwords" } - config = { description = "BIP Configuration" } - } - } - bip_web = { - secrets = { - passwords = { description = "Web Passwords" } - config = { description = "Web Configuration" } + passwords = { + description = "BIP Passwords" + tags = { + instance-access-policy = "full" + } + } + config = { + description = "BIP Configuration" + tags = { + instance-access-policy = "limited" + } + } } } bods = { secrets = { - passwords = { description = "BODS Passwords" } - config = { description = "BODS Configuration" } + passwords = { + description = "BODS Passwords" + tags = { + instance-access-policy = "full" + } + } + config = { + description = "BODS Configuration" + tags = { + instance-access-policy = "limited" + } + } } } db = { diff --git a/terraform/environments/nomis-combined-reporting/locals_test.tf b/terraform/environments/nomis-combined-reporting/locals_test.tf index 32360424768..8262f32ef38 100644 --- a/terraform/environments/nomis-combined-reporting/locals_test.tf +++ b/terraform/environments/nomis-combined-reporting/locals_test.tf @@ -13,7 +13,39 @@ locals { # please keep resources in alphabetical order baseline_test = { + acm_certificates = { + nomis_combined_reporting_wildcard_cert = { + cloudwatch_metric_alarms = module.baseline_presets.cloudwatch_metric_alarms.acm + domain_name = "modernisation-platform.service.justice.gov.uk" + subject_alternate_names = [ + "test.reporting.nomis.service.justice.gov.uk", + "*.test.reporting.nomis.service.justice.gov.uk", + ] + tags = { + description = "Wildcard certificate for the test environment" + } + } + } + ec2_instances = { + t1-ncr-cms-1 = merge(local.ec2_instances.bip_cms, { + config = merge(local.ec2_instances.bip_cms.config, { + availability_zone = "eu-west-2a" + instance_profile_policies = concat(local.ec2_instances.bip_cms.config.instance_profile_policies, [ + "Ec2T1ReportingPolicy", + ]) + }) + user_data_cloud_init = merge(local.ec2_instances.bip_cms.user_data_cloud_init, { + args = merge(local.ec2_instances.bip_cms.user_data_cloud_init.args, { + branch = "main" + }) + }) + tags = merge(local.ec2_instances.bip_cms.tags, { + instance-scheduling = "skip-scheduling" + nomis-combined-reporting-environment = "t1" + }) + }) + t1-ncr-db-1-a = merge(local.ec2_instances.db, { cloudwatch_metric_alarms = merge( local.cloudwatch_metric_alarms.db, @@ -41,10 +73,35 @@ locals { tags = merge(local.ec2_instances.db.tags, { description = "T1 NCR DATABASE" nomis-combined-reporting-environment = "t1" - oracle-sids = "T1BIPSYS T1BIPAUD" + oracle-sids = "T1BIPSYS T1BIPAUD T1BISYS T1BIAUD" instance-scheduling = "skip-scheduling" }) }) + + t1-ncr-web-1 = merge(local.ec2_instances.bip_web, { + config = merge(local.ec2_instances.bip_web.config, { + availability_zone = "eu-west-2a" + instance_profile_policies = concat(local.ec2_instances.bip_web.config.instance_profile_policies, [ + "Ec2T1ReportingPolicy", + ]) + }) + instance = merge(local.ec2_instances.bip_web.instance, { + instance_type = "r6i.large" + }) + user_data_cloud_init = merge(local.ec2_instances.bip_cms.user_data_cloud_init, { + args = merge(local.ec2_instances.bip_cms.user_data_cloud_init.args, { + branch = "main" + }) + }) + tags = merge(local.ec2_instances.bip_web.tags, { + instance-scheduling = "skip-scheduling" + nomis-combined-reporting-environment = "t1" + }) + }) + } + + efs = { + t1-ncr-sap-share = local.efs.sap_share } iam_policies = { @@ -90,14 +147,46 @@ locals { instance_target_groups = {} listeners = {} }) + + public = merge(local.lbs.public, { + instance_target_groups = { + t1-http-7777 = merge(local.lbs.public.instance_target_groups.http-7777, { + attachments = [ + { ec2_instance_name = "t1-ncr-web-1" }, + ] + }) + } + listeners = merge(local.lbs.public.listeners, { + https = merge(local.lbs.public.listeners.https, { + alarm_target_group_names = [] + rules = { + web = { + priority = 200 + actions = [{ + type = "forward" + target_group_name = "t1-http-7777" + }] + conditions = [{ + host_header = { + values = [ + "t1.test.reporting.nomis.service.justice.gov.uk", + ] + } + }] + } + } + }) + }) + }) } route53_zones = { "test.reporting.nomis.service.justice.gov.uk" = { records = [ { name = "db", type = "CNAME", ttl = "3600", records = ["t1-ncr-db-1-a.nomis-combined-reporting.hmpps-test.modernisation-platform.service.justice.gov.uk"] }, - { name = "web", type = "CNAME", ttl = "3600", records = ["t1-ncr-web-1-a.nomis-combined-reporting.hmpps-test.modernisation-platform.service.justice.gov.uk"] }, - { name = "etl", type = "CNAME", ttl = "3600", records = ["t1-ncr-etl-1-a.nomis-combined-reporting.hmpps-test.modernisation-platform.service.justice.gov.uk"] } + ] + lb_alias_records = [ + { name = "t1", type = "A", lbs_map_key = "public" }, ] } } @@ -119,6 +208,8 @@ locals { secretsmanager_secrets = { "/oracle/database/T1BIPSYS" = local.secretsmanager_secrets.db "/oracle/database/T1BIPAUD" = local.secretsmanager_secrets.db + "/oracle/database/T1BISYS" = local.secretsmanager_secrets.db + "/oracle/database/T1BIAUD" = local.secretsmanager_secrets.db "/sap/bip/t1" = local.secretsmanager_secrets.bip "/sap/bods/t1" = local.secretsmanager_secrets.bods } diff --git a/terraform/environments/nomis-combined-reporting/main.tf b/terraform/environments/nomis-combined-reporting/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/nomis-combined-reporting/main.tf +++ b/terraform/environments/nomis-combined-reporting/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/nomis-data-hub/locals.tf b/terraform/environments/nomis-data-hub/locals.tf index 794da1cd6bf..2ca908e66c0 100644 --- a/terraform/environments/nomis-data-hub/locals.tf +++ b/terraform/environments/nomis-data-hub/locals.tf @@ -26,6 +26,7 @@ locals { "ec2_instance_linux", "ec2_instance_textfile_monitoring", "ec2_windows", + "ssm_command", ] cloudwatch_metric_alarms_default_actions = ["pagerduty"] cloudwatch_metric_oam_links_ssm_parameters = ["hmpps-oem-${local.environment}"] @@ -42,6 +43,7 @@ locals { enable_image_builder = true enable_s3_bucket = true enable_s3_software_bucket = true + enable_ssm_command_monitoring = true s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] } } diff --git a/terraform/environments/nomis-data-hub/main.tf b/terraform/environments/nomis-data-hub/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/nomis-data-hub/main.tf +++ b/terraform/environments/nomis-data-hub/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/nomis/locals.tf b/terraform/environments/nomis/locals.tf index ada318258c9..2f5c27143a0 100644 --- a/terraform/environments/nomis/locals.tf +++ b/terraform/environments/nomis/locals.tf @@ -37,6 +37,7 @@ locals { enable_s3_bucket = true enable_s3_db_backup_bucket = true enable_s3_software_bucket = true + enable_ssm_command_monitoring = true route53_resolver_rules = { outbound-data-and-private-subnets = ["azure-fixngo-domain"] } s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] software_bucket_name = "ec2-image-builder-nomis" @@ -48,7 +49,6 @@ locals { enable_resource_explorer = true } - cloudwatch_metric_alarms = module.baseline_presets.cloudwatch_metric_alarms.ssm - security_groups = local.security_groups + security_groups = local.security_groups } } diff --git a/terraform/environments/nomis/locals_test.tf b/terraform/environments/nomis/locals_test.tf index e691faaa08a..6b98c78cbef 100644 --- a/terraform/environments/nomis/locals_test.tf +++ b/terraform/environments/nomis/locals_test.tf @@ -7,7 +7,6 @@ locals { baseline_presets_test = { options = { - enable_observability_platform_monitoring = true sns_topics = { pagerduty_integrations = { pagerduty = "nomis-test" @@ -51,10 +50,11 @@ locals { } ec2_autoscaling_groups = { - # NOT-ACTIVE (blue deployment) + # NOT-ACTIVE (blue deployment) - for testing Combined Reporting t1-nomis-web-a = merge(local.ec2_autoscaling_groups.web, { autoscaling_group = merge(local.ec2_autoscaling_groups.web.autoscaling_group, { desired_capacity = 0 + max_size = 1 }) # cloudwatch_metric_alarms = local.cloudwatch_metric_alarms.web config = merge(local.ec2_autoscaling_groups.web.config, { @@ -71,10 +71,11 @@ locals { }) }) tags = merge(local.ec2_autoscaling_groups.web.tags, { - nomis-environment = "t1" - oracle-db-hostname-a = "t1nomis-a.test.nomis.service.justice.gov.uk" - oracle-db-hostname-b = "t1nomis-b.test.nomis.service.justice.gov.uk" - oracle-db-name = "T1CNOM" + nomis-environment = "t1" + oracle-db-hostname-a = "t1nomis-a.test.nomis.service.justice.gov.uk" + oracle-db-hostname-b = "t1nomis-b.test.nomis.service.justice.gov.uk" + oracle-db-name = "T1CNOM" + reporting-environment = "aws" }) }) diff --git a/terraform/environments/nomis/main.tf b/terraform/environments/nomis/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/nomis/main.tf +++ b/terraform/environments/nomis/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/oasys-national-reporting/locals.tf b/terraform/environments/oasys-national-reporting/locals.tf index c8298624dd8..05c0cc63abe 100644 --- a/terraform/environments/oasys-national-reporting/locals.tf +++ b/terraform/environments/oasys-national-reporting/locals.tf @@ -26,6 +26,7 @@ locals { "ec2_linux", "ec2_instance_linux", "ec2_windows", + "ssm_command", ] cloudwatch_metric_alarms_default_actions = ["pagerduty"] cloudwatch_metric_oam_links_ssm_parameters = ["hmpps-oem-${local.environment}"] @@ -41,6 +42,7 @@ locals { enable_image_builder = true enable_s3_bucket = true enable_s3_shared_bucket = true + enable_ssm_command_monitoring = true s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] } } diff --git a/terraform/environments/oasys-national-reporting/locals_ec2_instances.tf b/terraform/environments/oasys-national-reporting/locals_ec2_instances.tf index 4a925b3718b..cccc7516d80 100644 --- a/terraform/environments/oasys-national-reporting/locals_ec2_instances.tf +++ b/terraform/environments/oasys-national-reporting/locals_ec2_instances.tf @@ -22,9 +22,9 @@ locals { } ebs_volumes = { "/dev/sda1" = { type = "gp3", size = 128 } # root volume - "/dev/xvdk" = { type = "gp3", size = 128 } # D:/ Temp - "/dev/xvdl" = { type = "gp3", size = 128 } # E:/ App - "/dev/xvdm" = { type = "gp3", size = 700 } # F:/ Storage + "xvdd" = { type = "gp3", size = 128 } # D:/ Temp + "xvde" = { type = "gp3", size = 128 } # E:/ App + "xvdf" = { type = "gp3", size = 700 } # F:/ Storage } instance = { disable_api_termination = false diff --git a/terraform/environments/oasys-national-reporting/locals_preproduction.tf b/terraform/environments/oasys-national-reporting/locals_preproduction.tf index a61ee51aa57..66689446382 100644 --- a/terraform/environments/oasys-national-reporting/locals_preproduction.tf +++ b/terraform/environments/oasys-national-reporting/locals_preproduction.tf @@ -87,8 +87,16 @@ locals { "Ec2SecretPolicy", ]) }) + # IMPORTANT: EBS volume initialization, labelling, formatting was carried out manually on this instance. It was not automated so these ebs_volume settings are bespoke. Additional volumes should NOT be /dev/xvd* see the local.ec2_instances.bods.ebs_volumes setting for the correct device names. + ebs_volumes = { + "/dev/sda1" = { type = "gp3", size = 128 } # root volume + "/dev/xvdk" = { type = "gp3", size = 128 } # D:/ Temp + "/dev/xvdl" = { type = "gp3", size = 128 } # E:/ App + "/dev/xvdm" = { type = "gp3", size = 700 } # F:/ Storage + } instance = merge(local.ec2_instances.bods.instance, { instance_type = "r6i.2xlarge" + disable_api_termination = true }) cloudwatch_metric_alarms = null tags = merge(local.ec2_instances.bods.tags, { diff --git a/terraform/environments/oasys-national-reporting/locals_test.tf b/terraform/environments/oasys-national-reporting/locals_test.tf index 096a578a62d..69d87671132 100644 --- a/terraform/environments/oasys-national-reporting/locals_test.tf +++ b/terraform/environments/oasys-national-reporting/locals_test.tf @@ -133,10 +133,10 @@ locals { instance_profile_policies = concat(local.ec2_autoscaling_groups.bods.config.instance_profile_policies, [ "Ec2SecretPolicy", ]) - # user_data_raw = base64encode(templatefile( - # "./templates/user-data-onr-bods-pwsh.yaml.tftpl", { - # branch = "TM/TM-620/test-pagefile-change" - # })) + user_data_raw = base64encode(templatefile( + "./templates/user-data-onr-bods-pwsh.yaml.tftpl", { + branch = "main" + })) }) instance = merge(local.ec2_autoscaling_groups.bods.instance, { instance_type = "m4.xlarge" @@ -151,92 +151,114 @@ locals { ec2_instances = { - # t2-onr-bods-1 = merge(local.ec2_instances.bods, { - # config = merge(local.ec2_instances.bods.config, { + t2-onr-bods-1 = merge(local.ec2_instances.bods, { + config = merge(local.ec2_instances.bods.config, { + availability_zone = "eu-west-2a" + instance_profile_policies = concat(local.ec2_instances.bods.config.instance_profile_policies, [ + "Ec2SecretPolicy", + ]) + }) + instance = merge(local.ec2_instances.bods.instance, { + instance_type = "m4.xlarge" + }) + cloudwatch_metric_alarms = null + tags = merge(local.ec2_instances.bods.tags, { + oasys-national-reporting-environment = "t2" + domain-name = "azure.noms.root" + }) + }) + + t2-onr-bods-2 = merge(local.ec2_instances.bods, { + config = merge(local.ec2_instances.bods.config, { + availability_zone = "eu-west-2b" + instance_profile_policies = concat(local.ec2_instances.bods.config.instance_profile_policies, [ + "Ec2SecretPolicy", + ]) + }) + instance = merge(local.ec2_instances.bods.instance, { + instance_type = "m4.xlarge" + }) + cloudwatch_metric_alarms = null + tags = merge(local.ec2_instances.bods.tags, { + oasys-national-reporting-environment = "t2" + domain-name = "azure.noms.root" + }) + }) + + # NOTE: These are all BOE 3.1 instances and are not currently needed + # t2-onr-boe-1-a = merge(local.ec2_instances.boe_app, { + # config = merge(local.ec2_instances.boe_app.config, { # availability_zone = "eu-west-2a" - # user_data_raw = base64encode(templatefile( - # "./templates/user-data-onr-bods-pwsh.yaml.tftpl", { - # } - # )) - # instance_profile_policies = concat(local.ec2_instances.bods.config.instance_profile_policies, [ + # instance_profile_policies = setunion(local.ec2_instances.boe_app.config.instance_profile_policies, [ # "Ec2SecretPolicy", # ]) # }) - # instance = merge(local.ec2_instances.bods.instance, { + # instance = merge(local.ec2_instances.boe_app.instance, { # instance_type = "m4.xlarge" # }) - # cloudwatch_metric_alarms = null - # tags = merge(local.ec2_instances.bods.tags, { + # tags = merge(local.ec2_instances.boe_app.tags, { # oasys-national-reporting-environment = "t2" - # domain-name = "azure.noms.root" # }) # }) - # Pending sorting out cluster install of Bods in modernisation-platform-configuration-management repo - # t2-onr-bods-2 = merge(local.ec2_instances.bods, { - # config = merge(local.ec2_instances.bods.config, { - # availability_zone = "eu-west-2b" - # user_data_raw = base64encode(templatefile( - # "./templates/user-data-onr-bods-pwsh.yaml.tftpl", { - # branch = "main" - # } - # )) - # instance_profile_policies = concat(local.ec2_instances.bods.config.instance_profile_policies, [ + # # NOTE: currently using a Rhel 6 instance for onr-web instances, not Rhel 7 & independent Tomcat install + # t2-onr-web-1-a = merge(local.ec2_instances.boe_web, { + # config = merge(local.ec2_instances.boe_web.config, { + # ami_name = "base_rhel_6_10_*" + # availability_zone = "eu-west-2a" + # instance_profile_policies = setunion(local.ec2_instances.boe_web.config.instance_profile_policies, [ # "Ec2SecretPolicy", # ]) # }) - # instance = merge(local.ec2_instances.bods.instance, { - # instance_type = "m4.xlarge" + # instance = merge(local.ec2_instances.boe_web.instance, { + # instance_type = "m4.large" + # metadata_options_http_tokens = "optional" # required as Rhel 6 cloud-init does not support IMDSv2 # }) - # cloudwatch_metric_alarms = null - # tags = merge(local.ec2_instances.bods.tags, { + # tags = merge(local.ec2_instances.boe_web.tags, { + # ami = "base_rhel_6_10" # oasys-national-reporting-environment = "t2" + # }) + # }) + # t2-onr-client-a = merge(local.ec2_instances.jumpserver, { + # config = merge(local.ec2_instances.jumpserver.config, { + # ami_name = "base_windows_server_2012_r2_release_2024-06-01T00-00-32.450Z" + # availability_zone = "eu-west-2a" + # }) + # tags = merge(local.ec2_instances.jumpserver.tags, { # domain-name = "azure.noms.root" # }) # }) + } - t2-onr-boe-1-a = merge(local.ec2_instances.boe_app, { - config = merge(local.ec2_instances.boe_app.config, { - availability_zone = "eu-west-2a" - instance_profile_policies = setunion(local.ec2_instances.boe_app.config.instance_profile_policies, [ - "Ec2SecretPolicy", - ]) - }) - instance = merge(local.ec2_instances.boe_app.instance, { - instance_type = "m4.xlarge" - }) - tags = merge(local.ec2_instances.boe_app.tags, { - oasys-national-reporting-environment = "t2" - }) - }) + fsx_windows = { + t2-bods-windows-share = { + preferred_availability_zone = "eu-west-2a" + deployment_type = "MULTI_AZ_1" + security_groups = ["bods"] + skip_final_backup = true + storage_capacity = 128 + throughput_capacity = 8 - # NOTE: currently using a Rhel 6 instance for onr-web instances, not Rhel 7 & independent Tomcat install - t2-onr-web-1-a = merge(local.ec2_instances.boe_web, { - config = merge(local.ec2_instances.boe_web.config, { - ami_name = "base_rhel_6_10_*" - availability_zone = "eu-west-2a" - instance_profile_policies = setunion(local.ec2_instances.boe_web.config.instance_profile_policies, [ - "Ec2SecretPolicy", - ]) - }) - instance = merge(local.ec2_instances.boe_web.instance, { - instance_type = "m4.large" - metadata_options_http_tokens = "optional" # required as Rhel 6 cloud-init does not support IMDSv2 - }) - tags = merge(local.ec2_instances.boe_web.tags, { - ami = "base_rhel_6_10" - oasys-national-reporting-environment = "t2" - }) - }) - t2-onr-client-a = merge(local.ec2_instances.jumpserver, { - config = merge(local.ec2_instances.jumpserver.config, { - ami_name = "base_windows_server_2012_r2_release_2024-06-01T00-00-32.450Z" - availability_zone = "eu-west-2a" - }) - tags = merge(local.ec2_instances.jumpserver.tags, { - domain-name = "azure.noms.root" - }) - }) + subnets = [ + { + name = "private" + availability_zones = ["eu-west-2a", "eu-west-2b"] + } + ] + + self_managed_active_directory = { + dns_ips = [ + module.ip_addresses.mp_ip.ad-azure-dc-a, + module.ip_addresses.mp_ip.ad-azure-dc-b, + ] + domain_name = "azure.noms.root" + username = "svc_join_domain" + password_secret_name = "/sap/bods/t2/passwords" + } + tags = { + backup = true + } + } } iam_policies = { diff --git a/terraform/environments/oasys-national-reporting/main.tf b/terraform/environments/oasys-national-reporting/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/oasys-national-reporting/main.tf +++ b/terraform/environments/oasys-national-reporting/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/oasys-national-reporting/templates/user-data-onr-bods-pwsh.yaml.tftpl b/terraform/environments/oasys-national-reporting/templates/user-data-onr-bods-pwsh.yaml.tftpl index 9329a892d73..a8085a34a5e 100644 --- a/terraform/environments/oasys-national-reporting/templates/user-data-onr-bods-pwsh.yaml.tftpl +++ b/terraform/environments/oasys-national-reporting/templates/user-data-onr-bods-pwsh.yaml.tftpl @@ -4,22 +4,26 @@ # See C:\Windows\System32\config\systemprofile\AppData\Local\Temp\EC2Launch* for script output version: 1.0 # version 1.0 is required as this executes AFTER the SSM Agent is running tasks: - - task: initializeVolume + - task: executeScript inputs: - initialize: devices - devices: - - device: xvdk - name: Temp - letter: D - partition: gpt - - device: xvdl - name: App - letter: E - partition: gpt - - device: xvdm - name: Storage - letter: F - partition: gpt + - frequency: once + type: powershell + runAs: admin + content: |- + # Initialize all offline disks + $offlineDisks = Get-Disk | Where-Object IsOffline -Eq $true + foreach ($disk in $offlineDisks) { + Initialize-Disk -Number $disk.Number -PartitionStyle GPT + } + # Create partitions and assign drive letters + $letters = @('D', 'E', 'F') + $labels = @('Temp', 'App', 'Storage') + $disks = Get-Disk | Where-Object PartitionStyle -Eq 'GPT' | Where-Object IsSystem -Eq $false + for ($i = 0; $i -lt $disks.Count; $i++) { + $partition = New-Partition -DiskNumber $disks[$i].Number -UseMaximumSize + Format-Volume -Partition $partition -FileSystem NTFS -NewFileSystemLabel $labels[$i] -Confirm:$false + Set-Partition -InputObject $partition -NewDriveLetter $letters[$i] + } - task: executeScript inputs: - frequency: once diff --git a/terraform/environments/oasys/iam.tf b/terraform/environments/oasys/iam.tf deleted file mode 100644 index 5d7491e0574..00000000000 --- a/terraform/environments/oasys/iam.tf +++ /dev/null @@ -1,45 +0,0 @@ -# Create user for MGN - for mgn agents running on azure vms -#tfsec:ignore:aws-iam-no-user-attached-policies -#tfsec:ignore:AWS273 -resource "aws_iam_user" "mgn_user" { - #checkov:skip=CKV_AWS_273: "Skipping as tfsec check is also set to ignore" - name = "MGN-Test" - tags = local.tags -} -#tfsec:ignore:aws-iam-no-user-attached-policies -resource "aws_iam_user_policy_attachment" "mgn_attach_policy_migration" { - #tfsec:ignore:aws-iam-no-user-attached-policies "This is a short lived user, so allowing IAM policies attached directly to a user." - #checkov:skip=CKV_AWS_40: "Skipping as tfsec check is also ignored" - user = aws_iam_user.mgn_user.name - policy_arn = "arn:aws:iam::aws:policy/AWSApplicationMigrationAgentInstallationPolicy" -} - -#tfsec:ignore:aws-iam-no-user-attached-policies -resource "aws_iam_user_policy_attachment" "mgn_attach_policy_discovery" { - #tfsec:ignore:aws-iam-no-user-attached-policies "This is a short lived user, so allowing IAM policies attached directly to a user." - #checkov:skip=CKV_AWS_40: "Skipping as tfsec check is also ignored" - user = aws_iam_user.mgn_user.name - policy_arn = "arn:aws:iam::aws:policy/AWSApplicationDiscoveryAgentAccess" -} - -resource "aws_iam_user_policy_attachment" "mgn_attach_policy_service_access" { - #tfsec:ignore:aws-iam-no-user-attached-policies "This is a short lived user, so allowing IAM policies attached directly to a user." - #checkov:skip=CKV_AWS_40: "Skipping as tfsec check is also ignored" - user = aws_iam_user.mgn_user.name - policy_arn = "arn:aws:iam::aws:policy/AWSApplicationDiscoveryServiceFullAccess" -} - -resource "aws_iam_user_policy_attachment" "mgn_attach_policy_migrationhub_access" { - #tfsec:ignore:aws-iam-no-user-attached-policies "This is a short lived user, so allowing IAM policies attached directly to a user." - #checkov:skip=CKV_AWS_40: "Skipping as tfsec check is also ignored" - user = aws_iam_user.mgn_user.name - policy_arn = "arn:aws:iam::aws:policy/AWSMigrationHubFullAccess" -} - -resource "aws_iam_user_policy_attachment" "mgn_attach_policy_app_migrationfull_access" { - #tfsec:ignore:aws-iam-no-user-attached-policies "This is a short lived user, so allowing IAM policies attached directly to a user." - #checkov:skip=CKV_AWS_40: "Skipping as tfsec check is also ignored" - user = aws_iam_user.mgn_user.name - policy_arn = "arn:aws:iam::aws:policy/AWSApplicationMigrationFullAccess" -} - diff --git a/terraform/environments/oasys/locals.tf b/terraform/environments/oasys/locals.tf index 150199c622a..21c2dff9d24 100644 --- a/terraform/environments/oasys/locals.tf +++ b/terraform/environments/oasys/locals.tf @@ -28,6 +28,7 @@ locals { "ec2_instance_linux", "ec2_instance_oracle_db_with_backup", "ec2_instance_textfile_monitoring", + "ssm_command", ] cloudwatch_metric_alarms_default_actions = ["pagerduty"] cloudwatch_metric_oam_links_ssm_parameters = ["hmpps-oem-${local.environment}"] @@ -45,6 +46,7 @@ locals { enable_s3_bucket = true enable_s3_db_backup_bucket = true enable_s3_shared_bucket = true + enable_ssm_command_monitoring = true enable_vmimport = true s3_bucket_name = "${local.application_name}-${local.environment}" s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] diff --git a/terraform/environments/oasys/locals_cloudwatch_dashboards.tf b/terraform/environments/oasys/locals_cloudwatch_dashboards.tf new file mode 100644 index 00000000000..94b2e979b03 --- /dev/null +++ b/terraform/environments/oasys/locals_cloudwatch_dashboards.tf @@ -0,0 +1,123 @@ +locals { + + cloudwatch_dashboard_widget_groups = { + ec2 = { + header_markdown = "## EC2 WEB and DB" + width = 8 + height = 8 + search_filter = { + negate = true + ec2_tag = [ + { tag_name = "server-type", tag_value = "oasys-web" }, + { tag_name = "server-type", tag_value = "oasys-db" }, + { tag_name = "server-type", tag_value = "oasys-bip" }, + { tag_name = "server-type", tag_value = "onr-db" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_autoscaling_group_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_autoscaling_group_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_autoscaling_group_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_windows.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_windows.high-memory-usage, + null, + ] + } + + connectivity = { + header_markdown = "## Connectivity" + width = 8 + height = 8 + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_connectivity_test.connectivity-test-all-failed, + null, + null + ] + } + + db = { + header_markdown = "## EC2 Oracle Database" + width = 8 + height = 8 + add_ebs_widgets = { + iops = true + throughput = true + } + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "oasys-db" }, + { tag_name = "server-type", tag_value = "onr-db" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_textfile_monitoring.textfile-monitoring-metric-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_textfile_monitoring.textfile-monitoring-metric-not-updated, + null + ] + } + + oasys = { + header_markdown = "## EC2 OASys WEB and DB" + width = 8 + height = 8 + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "oasys-db" }, + { tag_name = "server-type", tag_value = "oasys-web" }, + { tag_name = "server-type", tag_value = "oasys-bip" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + ] + } + + onr = { + header_markdown = "## EC2 ONR" + width = 8 + height = 8 + search_filter = { + ec2_tag = [ + { tag_name = "server-type", tag_value = "onr-db" }, + ] + } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + ] + } + } +} diff --git a/terraform/environments/oasys/locals_preproduction.tf b/terraform/environments/oasys/locals_preproduction.tf index 586d8d6896c..b405669b162 100644 --- a/terraform/environments/oasys/locals_preproduction.tf +++ b/terraform/environments/oasys/locals_preproduction.tf @@ -33,6 +33,20 @@ locals { } } + cloudwatch_dashboards = { + "CloudWatch-Default" = { + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + local.cloudwatch_dashboard_widget_groups.db, + local.cloudwatch_dashboard_widget_groups.onr, + local.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + } + ec2_autoscaling_groups = { pp-oasys-web-a = merge(local.ec2_autoscaling_groups.web, { autoscaling_schedules = { diff --git a/terraform/environments/oasys/locals_production.tf b/terraform/environments/oasys/locals_production.tf index f833f2239e4..816b2f40c6d 100644 --- a/terraform/environments/oasys/locals_production.tf +++ b/terraform/environments/oasys/locals_production.tf @@ -40,6 +40,109 @@ locals { } } + cloudwatch_dashboards = { + "CloudWatch-Default" = { + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + local.cloudwatch_dashboard_widget_groups.connectivity, + local.cloudwatch_dashboard_widget_groups.db, + local.cloudwatch_dashboard_widget_groups.onr, + local.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + "pd-oasys-db-a" = { + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + { + width = 8 + height = 8 + search_filter = { ec2_tag = [{ tag_name = "Name", tag_value = "pd-oasys-db-a" }] } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + ] + }, + { + header_markdown = "## OASys BATCH" + width = 8 + height = 8 + search_filter = { ec2_tag = [{ tag_name = "Name", tag_value = "pd-oasys-db-a" }] } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_textfile_monitoring.textfile-monitoring-metric-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_textfile_monitoring.textfile-monitoring-metric-not-updated, + null + ] + }, + { + header_markdown = "## EBS PERFORMANCE" + width = 8 + height = 8 + add_ebs_widgets = { iops = true, throughput = true } + search_filter = { ec2_tag = [{ tag_name = "Name", tag_value = "pd-oasys-db-a" }] } + widgets = [] + } + ] + } + "pd-oasys-db-b" = { + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + { + width = 8 + height = 8 + search_filter = { ec2_tag = [{ tag_name = "Name", tag_value = "pd-oasys-db-b" }] } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.cpu-utilization-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.instance-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2.system-status-check-failed, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.high-memory-usage, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_cwagent_linux.cpu-iowait-high, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_linux.free-disk-space-low, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_os.service-status-error-os-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_service_status_app.service-status-error-app-layer, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_connected.oracle-db-disconnected, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_oracle_db_backup.oracle-db-rman-backup-did-not-run, + ] + }, + { + header_markdown = "## Other" + width = 8 + height = 8 + search_filter = { ec2_tag = [{ tag_name = "Name", tag_value = "pd-oasys-db-b" }] } + widgets = [ + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_textfile_monitoring.textfile-monitoring-metric-error, + module.baseline_presets.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_textfile_monitoring.textfile-monitoring-metric-not-updated, + null + ] + }, + { + header_markdown = "## EBS PERFORMANCE" + width = 8 + height = 8 + add_ebs_widgets = { iops = true, throughput = true } + search_filter = { ec2_tag = [{ tag_name = "Name", tag_value = "pd-oasys-db-b" }] } + widgets = [] + } + ] + } + } + ec2_autoscaling_groups = { pd-oasys-web-a = merge(local.ec2_autoscaling_groups.web, { autoscaling_group = merge(local.ec2_autoscaling_groups.web.autoscaling_group, { diff --git a/terraform/environments/oasys/locals_security_groups.tf b/terraform/environments/oasys/locals_security_groups.tf index e980af0f315..3f274f4c8e1 100644 --- a/terraform/environments/oasys/locals_security_groups.tf +++ b/terraform/environments/oasys/locals_security_groups.tf @@ -12,9 +12,7 @@ locals { https_external = flatten([ module.ip_addresses.azure_fixngo_cidrs.internet_egress, module.ip_addresses.moj_cidrs.trusted_moj_digital_staff_public, - module.ip_addresses.moj_cidr.aws_cloud_platform_vpc, # "172.20.0.0/16" module.ip_addresses.external_cidrs.cloud_platform, - module.ip_addresses.azure_studio_hosting_public.devtest, ]) https_external_monitoring = flatten([ module.ip_addresses.mp_cidrs.non_live_eu_west_nat, @@ -37,16 +35,25 @@ locals { ]) ssh = ["10.0.0.0/8"] https_internal = flatten([ - module.ip_addresses.moj_cidr.aws_cloud_platform_vpc, "10.0.0.0/8", + module.ip_addresses.moj_cidr.aws_cloud_platform_vpc, # "172.20.0.0/16" ]) https_external = flatten([ module.ip_addresses.azure_fixngo_cidrs.internet_egress, module.ip_addresses.moj_cidrs.trusted_moj_digital_staff_public, - module.ip_addresses.moj_cidr.aws_cloud_platform_vpc, # "172.20.0.0/16" + module.ip_addresses.moj_cidr.vodafone_dia_networks, + module.ip_addresses.moj_cidr.palo_alto_primsa_access_corporate, module.ip_addresses.external_cidrs.cloud_platform, - module.ip_addresses.azure_studio_hosting_public.prod, - "10.0.0.0/8" + module.ip_addresses.external_cidrs.sodeco, + module.ip_addresses.external_cidrs.interserve, + module.ip_addresses.external_cidrs.meganexus, + module.ip_addresses.external_cidrs.serco, + module.ip_addresses.external_cidrs.rrp, + module.ip_addresses.external_cidrs.eos, + module.ip_addresses.external_cidrs.oasys_sscl, + module.ip_addresses.external_cidrs.dtv, + module.ip_addresses.external_cidrs.nps_wales, + module.ip_addresses.external_cidrs.dxw, ]) https_external_monitoring = flatten([ module.ip_addresses.mp_cidrs.live_eu_west_nat, @@ -77,17 +84,14 @@ locals { "10.0.0.0/8", module.ip_addresses.moj_cidr.aws_cloud_platform_vpc, # "172.20.0.0/16" ]) + # NOTE: this is at the limit for the number of rules in a single SG + # Always test changes in preproduction first https_external = flatten([ module.ip_addresses.azure_fixngo_cidrs.internet_egress, module.ip_addresses.moj_cidrs.trusted_moj_digital_staff_public, - module.ip_addresses.moj_cidr.aws_cloud_platform_vpc, # "172.20.0.0/16" module.ip_addresses.moj_cidr.vodafone_dia_networks, module.ip_addresses.moj_cidr.palo_alto_primsa_access_corporate, module.ip_addresses.external_cidrs.cloud_platform, - module.ip_addresses.azure_studio_hosting_public.prod, - "35.177.125.252/32", "35.177.137.160/32", # trusted_appgw_external_client_ips infra_ip.j5_phones - "20.49.214.199/32", "20.49.214.228/32", "20.26.11.71/32", "20.26.11.108/32", # Azure Landing Zone Egress - "195.59.75.0/24", "194.33.192.0/25", "194.33.193.0/25", "194.33.196.0/25", "194.33.197.0/25", # dom1_eucs_ras module.ip_addresses.external_cidrs.sodeco, module.ip_addresses.external_cidrs.interserve, module.ip_addresses.external_cidrs.meganexus, @@ -226,14 +230,11 @@ locals { self = true } http8080 = { - description = "Allow http8080 ingress" - from_port = 0 - to_port = 8080 - protocol = "tcp" - cidr_blocks = flatten([ - local.security_group_cidrs.https_internal, - local.security_group_cidrs.https_external, - ]) + description = "Allow http8080 ingress" + from_port = 0 + to_port = 8080 + protocol = "tcp" + cidr_blocks = local.security_group_cidrs.https_internal security_groups = ["private_lb", "public_lb"] } } diff --git a/terraform/environments/oasys/locals_test.tf b/terraform/environments/oasys/locals_test.tf index e67c876c39e..03436ea67a4 100644 --- a/terraform/environments/oasys/locals_test.tf +++ b/terraform/environments/oasys/locals_test.tf @@ -3,7 +3,6 @@ locals { baseline_presets_test = { options = { - enable_observability_platform_monitoring = true sns_topics = { pagerduty_integrations = { pagerduty = "oasys-test" @@ -32,6 +31,20 @@ locals { } } + cloudwatch_dashboards = { + "CloudWatch-Default" = { + periodOverride = "auto" + start = "-PT6H" + widget_groups = [ + module.baseline_presets.cloudwatch_dashboard_widget_groups.lb, + local.cloudwatch_dashboard_widget_groups.db, + local.cloudwatch_dashboard_widget_groups.onr, + local.cloudwatch_dashboard_widget_groups.ec2, + module.baseline_presets.cloudwatch_dashboard_widget_groups.ssm_command, + ] + } + } + ec2_autoscaling_groups = { t1-oasys-web-a = merge(local.ec2_autoscaling_groups.web, { autoscaling_schedules = { diff --git a/terraform/environments/oasys/main.tf b/terraform/environments/oasys/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/oasys/main.tf +++ b/terraform/environments/oasys/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/observability-platform/environment-configurations.tf b/terraform/environments/observability-platform/environment-configurations.tf index a50b07efc2e..656dd941e2d 100644 --- a/terraform/environments/observability-platform/environment-configurations.tf +++ b/terraform/environments/observability-platform/environment-configurations.tf @@ -16,10 +16,56 @@ locals { athena_enabled = false } } + } + } + grafana_version = "10.4" + grafana_api_key_rotator_version = "1.0.10" + } + production = { + tenant_configuration = { + "observability-platform" = { + identity_centre_team = "observability-platform" + slack_channels = ["observability-platform-production-alerts"] + aws_accounts = { + "observability-platform-production" = { + cloudwatch_enabled = true + prometheus_push_enabled = false + amazon_prometheus_query_enabled = false + xray_enabled = true + athena_enabled = false + } + } }, "analytical-platform" = { identity_centre_team = "analytical-platform" aws_accounts = { + "analytical-platform-compute-development" = { + cloudwatch_enabled = true + prometheus_push_enabled = false + amazon_prometheus_query_enabled = true + amazon_prometheus_workspace_region = "eu-west-2" + amazon_prometheus_workspace_id = "ws-bfdd5d7a-5571-4686-bfd4-43ab07cf8d54ba" + xray_enabled = true + athena_enabled = false + }, + "analytical-platform-compute-production" = { + cloudwatch_enabled = true + prometheus_push_enabled = false + amazon_prometheus_query_enabled = true + amazon_prometheus_workspace_region = "eu-west-2" + amazon_prometheus_workspace_id = "ws-257796b7-4aa4-4c18-b906-6dd21e95d7b73e" + xray_enabled = true + athena_enabled = false + }, + "analytical-platform-compute-test" = { + cloudwatch_enabled = true + prometheus_push_enabled = false + amazon_prometheus_query_enabled = true + amazon_prometheus_workspace_region = "eu-west-2" + amazon_prometheus_workspace_id = "ws-a9d7f576-58b7-4748-b4c1-b02bbdc54a2922" + xray_enabled = true + athena_enabled = false + }, "analytical-platform-ingestion-development" = { cloudwatch_enabled = true prometheus_push_enabled = false @@ -27,54 +73,51 @@ locals { xray_enabled = true athena_enabled = false }, - "analytical-platform-compute-development" = { + "analytical-platform-ingestion-production" = { cloudwatch_enabled = true prometheus_push_enabled = false - amazon_prometheus_query_enabled = true - amazon_prometheus_workspace_id = "ws-bfdd5d7a-5571-4686-bfd4-43ab07cf8d54ba" + amazon_prometheus_query_enabled = false xray_enabled = true athena_enabled = false }, - "analytical-platform-compute-test" = { + "analytical-platform-development" = { cloudwatch_enabled = true prometheus_push_enabled = false - amazon_prometheus_query_enabled = true - amazon_prometheus_workspace_id = "ws-a9d7f576-58b7-4748-b4c1-b02bbdc54a2922" - xray_enabled = true + amazon_prometheus_query_enabled = false + xray_enabled = false athena_enabled = false } - } - }, - "data-engineering" = { - "identity_centre_team" = "data-engineering", - "aws_accounts" = { - "analytical-platform-data-engineering-sandboxa" = { + "analytical-platform-production" = { + cloudwatch_enabled = true + prometheus_push_enabled = false + amazon_prometheus_query_enabled = true + amazon_prometheus_workspace_region = "eu-west-1" + amazon_prometheus_workspace_id = "ws-a7b353be-244a-47e7-8054-436b41c050d932" + xray_enabled = false + athena_enabled = false + }, + "analytical-platform-data-development" = { cloudwatch_enabled = true prometheus_push_enabled = false amazon_prometheus_query_enabled = false xray_enabled = false athena_enabled = false - } - } - }, - "digital-prison-reporting" = { - "identity_centre_team" = "hmpps-digital-prison-reporting", - "aws_accounts" = { - "digital-prison-reporting-development" = { + }, + "analytical-platform-data-production" = { cloudwatch_enabled = true prometheus_push_enabled = false amazon_prometheus_query_enabled = false xray_enabled = false athena_enabled = false }, - "digital-prison-reporting-preproduction" = { + "analytical-platform-landing-production" = { cloudwatch_enabled = true prometheus_push_enabled = false amazon_prometheus_query_enabled = false xray_enabled = false athena_enabled = false }, - "digital-prison-reporting-test" = { + "analytical-platform-management-production" = { cloudwatch_enabled = true prometheus_push_enabled = false amazon_prometheus_query_enabled = false @@ -83,17 +126,17 @@ locals { } } }, - "digital-studio-operations" = { - "identity_centre_team" = "studio-webops" + "data-engineering" = { + "identity_centre_team" = "data-engineering", "aws_accounts" = { - "nomis-test" = { + "analytical-platform-data-engineering-sandboxa" = { cloudwatch_enabled = true prometheus_push_enabled = false amazon_prometheus_query_enabled = false xray_enabled = false athena_enabled = false - } - "oasys-test" = { + }, + "analytical-platform-data-engineering-production" = { cloudwatch_enabled = true prometheus_push_enabled = false amazon_prometheus_query_enabled = false @@ -101,59 +144,37 @@ locals { athena_enabled = false } } - } - } - grafana_version = "10.4" - grafana_api_key_rotator_version = "1.0.10" - } - production = { - tenant_configuration = { - "observability-platform" = { - identity_centre_team = "observability-platform" - slack_channels = ["observability-platform-production-alerts"] - aws_accounts = { - "observability-platform-production" = { - cloudwatch_enabled = true - prometheus_push_enabled = false - amazon_prometheus_query_enabled = false - xray_enabled = true - athena_enabled = false - } - } }, - "analytical-platform" = { - identity_centre_team = "analytical-platform" - aws_accounts = { - "analytical-platform-ingestion-production" = { + "digital-prison-reporting" = { + "identity_centre_team" = "hmpps-digital-prison-reporting", + "aws_accounts" = { + "digital-prison-reporting-development" = { cloudwatch_enabled = true + cloudwatch_custom_namespaces = "DPRAgentCustomMetrics,DPRDataReconciliationCustom" prometheus_push_enabled = false amazon_prometheus_query_enabled = false - xray_enabled = true + xray_enabled = false athena_enabled = false }, - "analytical-platform-compute-production" = { + "digital-prison-reporting-preproduction" = { cloudwatch_enabled = true + cloudwatch_custom_namespaces = "DPRAgentCustomMetrics,DPRDataReconciliationCustom" prometheus_push_enabled = false - amazon_prometheus_query_enabled = true - amazon_prometheus_workspace_id = "ws-257796b7-4aa4-4c18-b906-6dd21e95d7b73e" - xray_enabled = true + amazon_prometheus_query_enabled = false + xray_enabled = false athena_enabled = false }, - "analytical-platform-production" = { + "digital-prison-reporting-production" = { cloudwatch_enabled = true + cloudwatch_custom_namespaces = "DPRAgentCustomMetrics,DPRDataReconciliationCustom" prometheus_push_enabled = false - amazon_prometheus_query_enabled = true - amazon_prometheus_workspace_id = "ws-a7b353be-244a-47e7-8054-436b41c050d932" + amazon_prometheus_query_enabled = false xray_enabled = false athena_enabled = false - } - } - }, - "digital-prison-reporting" = { - "identity_centre_team" = "hmpps-digital-prison-reporting", - "aws_accounts" = { - "digital-prison-reporting-production" = { + }, + "digital-prison-reporting-test" = { cloudwatch_enabled = true + cloudwatch_custom_namespaces = "DPRAgentCustomMetrics,DPRDataReconciliationCustom" prometheus_push_enabled = false amazon_prometheus_query_enabled = false xray_enabled = false diff --git a/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/main.tf b/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/main.tf index 263dcc1c6d6..e15652c448d 100644 --- a/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/main.tf +++ b/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/main.tf @@ -1,6 +1,6 @@ locals { name = "${var.name}-amp" - url = "https://aps-workspaces.eu-west-2.amazonaws.com/workspaces/${var.amazon_prometheus_workspace_id}/" + url = "https://aps-workspaces.${var.amazon_prometheus_workspace_region}.amazonaws.com/workspaces/${var.amazon_prometheus_workspace_id}/" } resource "grafana_data_source" "this" { @@ -13,7 +13,7 @@ resource "grafana_data_source" "this" { sigV4Auth = true sigV4AuthType = "ec2_iam_role" sigV4AssumeRoleArn = "arn:aws:iam::${var.account_id}:role/observability-platform" - sigV4Region = "eu-west-2" + sigV4Region = var.amazon_prometheus_workspace_region sigV4ExternalId = var.name }) } diff --git a/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/variables.tf b/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/variables.tf index e5e53da1d33..402955892d5 100644 --- a/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/variables.tf +++ b/terraform/environments/observability-platform/modules/grafana/amazon-prometheus-query-source/variables.tf @@ -6,6 +6,10 @@ variable "account_id" { type = string } +variable "amazon_prometheus_workspace_region" { + type = string +} + variable "amazon_prometheus_workspace_id" { type = string } diff --git a/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/main.tf b/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/main.tf index b09155ebf1b..285c5fb6202 100644 --- a/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/main.tf +++ b/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/main.tf @@ -31,9 +31,10 @@ module "amazon_prometheus_query_source" { source = "../../grafana/amazon-prometheus-query-source" - name = each.key - account_id = var.environment_management.account_ids[each.key] - amazon_prometheus_workspace_id = each.value.amazon_prometheus_workspace_id + name = each.key + account_id = var.environment_management.account_ids[each.key] + amazon_prometheus_workspace_region = try(each.value.amazon_prometheus_workspace_region, "eu-west-2") + amazon_prometheus_workspace_id = each.value.amazon_prometheus_workspace_id } locals { diff --git a/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/variables.tf b/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/variables.tf index 0fe0ba87859..7db584f4029 100644 --- a/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/variables.tf +++ b/terraform/environments/observability-platform/modules/observability-platform/tenant-configuration/variables.tf @@ -12,13 +12,14 @@ variable "identity_centre_team" { variable "aws_accounts" { type = map(object({ - cloudwatch_enabled = optional(bool) - cloudwatch_custom_namespaces = optional(string) - prometheus_push_enabled = optional(bool) - amazon_prometheus_query_enabled = optional(bool) - amazon_prometheus_workspace_id = optional(string) - xray_enabled = optional(bool) - athena_enabled = optional(bool) + cloudwatch_enabled = optional(bool) + cloudwatch_custom_namespaces = optional(string) + prometheus_push_enabled = optional(bool) + amazon_prometheus_query_enabled = optional(bool) + amazon_prometheus_workspace_region = optional(string) + amazon_prometheus_workspace_id = optional(string) + xray_enabled = optional(bool) + athena_enabled = optional(bool) athena_config = optional(map(object({ database = string workgroup = string diff --git a/terraform/environments/panda-cyber-appsec-lab/ec2.tf b/terraform/environments/panda-cyber-appsec-lab/ec2.tf index e0ef2484906..ae1fe923a37 100644 --- a/terraform/environments/panda-cyber-appsec-lab/ec2.tf +++ b/terraform/environments/panda-cyber-appsec-lab/ec2.tf @@ -1,12 +1,13 @@ # Kali Linux Instance resource "aws_instance" "kali_linux" { ami = "ami-0f398bcc12f72f967" // aws-marketplace/kali-last-snapshot-amd64-2024.2.0-804fcc46-63fc-4eb6-85a1-50e66d6c7215 - associate_public_ip_address = false + associate_public_ip_address = true instance_type = "t2.micro" subnet_id = module.vpc.private_subnets.0 vpc_security_group_ids = [aws_security_group.kali_linux_sg.id] iam_instance_profile = aws_iam_instance_profile.ssm_instance_profile.name ebs_optimized = true + metadata_options { http_tokens = "required" } @@ -21,27 +22,39 @@ resource "aws_instance" "kali_linux" { } user_data = <<-EOF #!/bin/bash - # Update and install dependencies - apt-get update - apt-get upgrade - apt-get install -y wget + set -e + exec > >(tee /var/log/user-data.log | logger -t user-data) 2>&1 + + # Update system packages + echo "Updating and upgrading system packages..." + apt-get update -y + apt-get upgrade -y + + # Install necessary tools and Kali default tools + echo "Installing wget, git, and kali-linux-default tools..." + apt-get install -y wget git kali-linux-default + + # Check if 'kali' user exists + if id "kali" &>/dev/null; then + echo "User 'kali' exists. Proceeding to create tooling directory..." + + # Create tooling directory and set ownership + mkdir -p /home/kali/tooling + chown -R kali:kali /home/kali + echo "Tooling directory created under /home/kali and ownership set." + + # Clone the repository as 'kali' user + echo "Cloning gotestwaf repository into /home/kali/tooling..." + sudo -u kali git clone https://github.com/wallarm/gotestwaf.git /home/kali/tooling + echo "Repository cloned successfully." + else + echo "User 'kali' does not exist. Exiting." + exit 1 + fi + + echo "User data script completed successfully." - # Download the SSM agent - wget https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/debian_amd64/amazon-ssm-agent.deb - - # Install the agent - dpkg -i amazon-ssm-agent.deb - - # Start the SSM service - systemctl enable amazon-ssm-agent - systemctl start amazon-ssm-agent - - # Check the status - systemctl status amazon-ssm-agent - - # Install kali-linux-default tools - apt-get install -y kali-linux-default EOF tags = { @@ -53,12 +66,13 @@ resource "aws_instance" "kali_linux" { # Defect Dojo Instance resource "aws_instance" "defect_dojo" { ami = "ami-0e8d228ad90af673b" - associate_public_ip_address = false + associate_public_ip_address = true instance_type = "t2.micro" subnet_id = module.vpc.private_subnets.0 vpc_security_group_ids = [aws_security_group.kali_linux_sg.id] iam_instance_profile = aws_iam_instance_profile.ssm_instance_profile.name ebs_optimized = true + metadata_options { http_tokens = "required" } diff --git a/terraform/environments/planetfm/locals.tf b/terraform/environments/planetfm/locals.tf index f0e16d6eda3..3fc3d4c065e 100644 --- a/terraform/environments/planetfm/locals.tf +++ b/terraform/environments/planetfm/locals.tf @@ -24,6 +24,7 @@ locals { "network_lb", "ec2", "ec2_windows", + "ssm_command", ] cloudwatch_metric_alarms_default_actions = ["pagerduty"] cloudwatch_metric_oam_links_ssm_parameters = ["hmpps-oem-${local.environment}"] @@ -39,6 +40,7 @@ locals { enable_image_builder = true enable_s3_bucket = true enable_s3_software_bucket = true + enable_ssm_command_monitoring = true s3_iam_policies = ["EC2S3BucketWriteAndDeleteAccessPolicy"] } } diff --git a/terraform/environments/planetfm/main.tf b/terraform/environments/planetfm/main.tf index a23f7e6d41b..d234a1e3a07 100644 --- a/terraform/environments/planetfm/main.tf +++ b/terraform/environments/planetfm/main.tf @@ -74,6 +74,7 @@ module "baseline" { ) cloudwatch_metric_alarms = merge( + module.baseline_presets.cloudwatch_metric_alarms_baseline, lookup(local.baseline_all_environments, "cloudwatch_metric_alarms", {}), lookup(local.baseline_environment_specific, "cloudwatch_metric_alarms", {}), ) @@ -177,6 +178,11 @@ module "baseline" { lookup(local.baseline_environment_specific, "s3_buckets", {}), ) + schedule_alarms_lambda = merge( + lookup(local.baseline_all_environments, "schedule_alarms_lambda", {}), + lookup(local.baseline_environment_specific, "schedule_alarms_lambda", {}), + ) + secretsmanager_secrets = merge( module.baseline_presets.secretsmanager_secrets, lookup(local.baseline_all_environments, "secretsmanager_secrets", {}), diff --git a/terraform/environments/ppud/alb_external.tf b/terraform/environments/ppud/alb_external.tf index e87d21f0175..a672ed7cdb4 100644 --- a/terraform/environments/ppud/alb_external.tf +++ b/terraform/environments/ppud/alb_external.tf @@ -117,7 +117,7 @@ resource "aws_lb_listener" "WAM-Front-End-DEV" { default_action { type = "forward" - target_group_arn = aws_lb_target_group.WAM-Target-Group.arn + target_group_arn = aws_lb_target_group.WAM-Target-Group-Dev[0].arn } } @@ -131,7 +131,7 @@ resource "aws_lb_listener" "WAM-Front-End-Preprod" { default_action { type = "forward" - target_group_arn = aws_lb_target_group.WAM-Target-Group.arn + target_group_arn = aws_lb_target_group.WAM-Target-Group-Preprod[0].arn } } @@ -145,10 +145,11 @@ resource "aws_lb_listener" "WAM-Front-End-Prod" { default_action { type = "forward" - target_group_arn = aws_lb_target_group.WAM-Target-Group.arn + target_group_arn = aws_lb_target_group.WAM-Target-Group-Prod[0].arn } } +/* resource "aws_lb_target_group" "WAM-Target-Group" { name = "WAM" port = 80 @@ -170,18 +171,18 @@ resource "aws_lb_target_group" "WAM-Target-Group" { Name = "${var.networking[0].business-unit}-${local.environment}" } } - +*/ resource "aws_lb_target_group_attachment" "WAM-Portal-development" { count = local.is-development == true ? 1 : 0 - target_group_arn = aws_lb_target_group.WAM-Target-Group.arn + target_group_arn = aws_lb_target_group.WAM-Target-Group-Dev[0].arn target_id = aws_instance.s609693lo6vw105[0].id - port = 80 + port = 443 } resource "aws_lb_target_group_attachment" "WAM-Portal-preproduction" { count = local.is-preproduction == true ? 1 : 0 - target_group_arn = aws_lb_target_group.WAM-Target-Group.arn + target_group_arn = aws_lb_target_group.WAM-Target-Group-Preprod[0].arn target_id = aws_instance.s618358rgvw201[0].id port = 80 } @@ -189,7 +190,99 @@ resource "aws_lb_target_group_attachment" "WAM-Portal-preproduction" { resource "aws_lb_target_group_attachment" "WAM-Portal-production" { count = local.is-production == true ? 1 : 0 - target_group_arn = aws_lb_target_group.WAM-Target-Group.arn + target_group_arn = aws_lb_target_group.WAM-Target-Group-Prod[0].arn target_id = aws_instance.s618358rgvw204[0].id port = 80 } + +resource "aws_lb_target_group" "WAM-Target-Group-Dev" { + count = local.is-development == true ? 1 : 0 + name = "WAM-Dev" + port = 443 + protocol = "HTTPS" + vpc_id = data.aws_vpc.shared.id + + health_check { + enabled = true + path = "/" + interval = 30 + protocol = "HTTPS" + port = 443 + timeout = 5 + healthy_threshold = 5 + unhealthy_threshold = 2 + matcher = "302" + } + tags = { + Name = "${var.networking[0].business-unit}-${local.environment}" + } +} + +resource "aws_lb_target_group" "WAM-Target-Group-Preprod" { + count = local.is-preproduction == true ? 1 : 0 + name = "WAM-Preprod" + port = 80 + protocol = "HTTP" + vpc_id = data.aws_vpc.shared.id + + health_check { + enabled = true + path = "/" + interval = 30 + protocol = "HTTP" + port = 80 + timeout = 5 + healthy_threshold = 5 + unhealthy_threshold = 2 + matcher = "302" + } + tags = { + Name = "${var.networking[0].business-unit}-${local.environment}" + } +} + +resource "aws_lb_target_group" "WAM-Target-Group-Preprod-2" { + count = local.is-preproduction == true ? 1 : 0 + name = "WAM-Preprod-2" + port = 443 + protocol = "HTTPS" + vpc_id = data.aws_vpc.shared.id + + health_check { + enabled = true + path = "/" + interval = 30 + protocol = "HTTPS" + port = 443 + timeout = 5 + healthy_threshold = 5 + unhealthy_threshold = 2 + matcher = "302" + } + tags = { + Name = "${var.networking[0].business-unit}-${local.environment}" + } +} + +resource "aws_lb_target_group" "WAM-Target-Group-Prod" { + count = local.is-production == true ? 1 : 0 + name = "WAM-Prod" + port = 80 + protocol = "HTTP" + vpc_id = data.aws_vpc.shared.id + + health_check { + enabled = true + path = "/" + interval = 30 + protocol = "HTTP" + port = 80 + timeout = 5 + healthy_threshold = 5 + unhealthy_threshold = 2 + matcher = "302" + } + tags = { + Name = "${var.networking[0].business-unit}-${local.environment}" + } +} diff --git a/terraform/environments/ppud/application_variables.json b/terraform/environments/ppud/application_variables.json index 49e72047be2..241d4f19403 100644 --- a/terraform/environments/ppud/application_variables.json +++ b/terraform/environments/ppud/application_variables.json @@ -43,14 +43,14 @@ "PPUD_Target": "PPUD", "patch_group": "prod_win_patch", "patch_maintenance_window_name": "prod_patch_maintenance_window", - "patch_maintenance_schedule_cron": "cron(0 20 ? * 3#4 *)", + "patch_maintenance_schedule_cron": "cron(0 20 ? * 4#3 *)", "patch_maintenance_window_duration": 4, "maintenance_window_target_name": "prod_patch_maintenance_window_target", "maintenance_window_target_description": "This is the production patch maintenance window target", "maintenance_window_task_name": "Prod-Instance-Patch", "lin_patch_group": "prod_lin_patch", "patch_lin_maintenance_window_name": "prod_lin_patch_maintenance_window", - "patch_lin_maintenance_schedule_cron": "cron(0 20 ? * 4#4 *)", + "patch_lin_maintenance_schedule_cron": "cron(0 20 ? * 5#3 *)", "patch_lin_maintenance_window_duration": 4, "maintenance_lin_window_target_name": "prod_lin_patch_maintenance_window_target", "maintenance_lin_window_target_description": "This is the production linux patch maintenance window target", diff --git a/terraform/environments/ppud/cloudwatch_alarms_windows.tf b/terraform/environments/ppud/cloudwatch_alarms_windows.tf index 91df665a47d..0f428cdb999 100644 --- a/terraform/environments/ppud/cloudwatch_alarms_windows.tf +++ b/terraform/environments/ppud/cloudwatch_alarms_windows.tf @@ -277,9 +277,9 @@ resource "aws_cloudwatch_metric_alarm" "low_disk_space_H_volume_rgvw027" { namespace = "CWAgent" period = "60" statistic = "Average" - threshold = "5" + threshold = "3" treat_missing_data = "notBreaching" - alarm_description = "This metric monitors the amount of free disk space on the instance. If the amount of free disk space falls below 5% for 5 minutes, the alarm will trigger" + alarm_description = "This metric monitors the amount of free disk space on the instance. If the amount of free disk space falls below 3% for 5 minutes, the alarm will trigger" alarm_actions = [aws_sns_topic.cw_alerts[0].arn] dimensions = { InstanceId = "i-00cbccc46d25e77c6" diff --git a/terraform/environments/ppud/eventbridge.tf b/terraform/environments/ppud/eventbridge.tf new file mode 100644 index 00000000000..363b1d697ba --- /dev/null +++ b/terraform/environments/ppud/eventbridge.tf @@ -0,0 +1,156 @@ +################################################ +# Eventbridge Rules (to invoke Lambda functions) +################################################ + +# Eventbridge rule to invoke the Send CPU Graph lambda function every weekday at 17:05 + +resource "aws_lambda_permission" "allow_eventbridge_invoke_send_cpu_graph_prod" { + count = local.is-production == true ? 1 : 0 + statement_id = "AllowEventBridgeInvoke" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.terraform_lambda_func_send_cpu_graph_prod[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.daily_schedule_send_cpu_graph_prod[0].arn +} + +resource "aws_cloudwatch_event_rule" "daily_schedule_send_cpu_graph_prod" { + count = local.is-production == true ? 1 : 0 + name = "send-cpu-graph-daily-weekday-schedule" + description = "Trigger Lambda at 17:00 UTC on weekdays" + schedule_expression = "cron(5 17 ? * MON-FRI *)" +} + +resource "aws_cloudwatch_event_target" "trigger_lambda_target_send_cpu_graph_prod" { + count = local.is-production == true ? 1 : 0 + rule = aws_cloudwatch_event_rule.daily_schedule_send_cpu_graph_prod[0].name + target_id = "send_cpu_graph" + arn = aws_lambda_function.terraform_lambda_func_send_cpu_graph_prod[0].arn +} + +# Eventbridge rule to invoke the PPUD Email Report lambda function every Monday at 07:00 + +resource "aws_lambda_permission" "allow_eventbridge_invoke_ppud_email_report_prod" { + count = local.is-production == true ? 1 : 0 + statement_id = "AllowEventBridgeInvoke" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.terraform_lambda_func_ppud_email_report_prod[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.weekly_schedule_ppud_email_report_prod[0].arn +} + +resource "aws_cloudwatch_event_rule" "weekly_schedule_ppud_email_report_prod" { + count = local.is-production == true ? 1 : 0 + name = "ppud-email-report-weekly-schedule" + description = "Trigger Lambda at 07:00 UTC each Monday" + schedule_expression = "cron(0 7 ? * MON *)" +} + +resource "aws_cloudwatch_event_target" "trigger_lambda_target_ppud_email_report_prod" { + count = local.is-production == true ? 1 : 0 + rule = aws_cloudwatch_event_rule.weekly_schedule_ppud_email_report_prod[0].name + target_id = "ppud_email_report" + arn = aws_lambda_function.terraform_lambda_func_ppud_email_report_prod[0].arn +} + +# Eventbridge Rule to Disable CPU Alarms each Friday at 23:00 + +resource "aws_cloudwatch_event_rule" "disable_cpu_alarm" { + count = local.is-production == true ? 1 : 0 + name = "disable_cpu_alarm" + description = "Runs Weekly every Saturday at 00:00 am" + schedule_expression = "cron(0 23 ? * FRI *)" # Time Zone is in UTC + # schedule_expression = "cron(0 0 ? * SAT *)" # Time Zone is in UTC +} + +resource "aws_cloudwatch_event_target" "trigger_lambda_disable_cpu_alarm" { + count = local.is-production == true ? 1 : 0 + rule = aws_cloudwatch_event_rule.disable_cpu_alarm[0].name + target_id = "disable_cpu_alarm" + arn = aws_lambda_function.terraform_lambda_disable_cpu_alarm[0].arn +} + +resource "aws_lambda_permission" "allow_cloudwatch_to_disable_cpu_alarm" { + count = local.is-production == true ? 1 : 0 + statement_id = "AllowExecutionFromCloudWatch" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.terraform_lambda_disable_cpu_alarm[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.disable_cpu_alarm[0].arn +} + +# Eventbridge Rule to Enable CPU Alarms each Monday at 05:00 + +resource "aws_cloudwatch_event_rule" "enable_cpu_alarm" { + count = local.is-production == true ? 1 : 0 + name = "enable_cpu_alarm" + description = "Runs Weekly every Monday at 05:00 am" + schedule_expression = "cron(0 5 ? * MON *)" # Time Zone is in UTC + # schedule_expression = "cron(0 0 ? * MON *)" # Time Zone is in UTC +} + +resource "aws_cloudwatch_event_target" "trigger_lambda_enable_cpu_alarm" { + count = local.is-production == true ? 1 : 0 + rule = aws_cloudwatch_event_rule.enable_cpu_alarm[0].name + target_id = "enable_cpu_alarm" + arn = aws_lambda_function.terraform_lambda_enable_cpu_alarm[0].arn +} + +resource "aws_lambda_permission" "allow_cloudwatch_to_enable_cpu_alarm" { + count = local.is-production == true ? 1 : 0 + statement_id = "AllowExecutionFromCloudWatch" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.terraform_lambda_enable_cpu_alarm[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.enable_cpu_alarm[0].arn +} + + +# EventBridge Rule to stop EC2 instances + +resource "aws_cloudwatch_event_rule" "stop_instance" { + count = local.is-production == true ? 1 : 0 + name = "stop-instance" + description = "Runs Monthly on 2nd Wednesday at 00:00am GMT" + schedule_expression = "cron(0 01 ? * 4#2 *)" # Time Zone is in UTC +} + +resource "aws_cloudwatch_event_target" "trigger_lambda_monthly_once_stop" { + count = local.is-production == true ? 1 : 0 + rule = aws_cloudwatch_event_rule.stop_instance[0].name + target_id = "stop-instance" + arn = aws_lambda_function.terraform_lambda_func_stop[0].arn +} + +resource "aws_lambda_permission" "allow_cloudwatch_to_call_lambda_stop" { + count = local.is-production == true ? 1 : 0 + statement_id = "AllowExecutionFromCloudWatch" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.terraform_lambda_func_stop[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.stop_instance[0].arn +} + +# EventBridge Rule to start EC2 instances + +resource "aws_cloudwatch_event_rule" "start_instance" { + count = local.is-production == true ? 1 : 0 + name = "start-instance" + description = "Runs Monthly on 2nd Tuesday at 19:00 GMT" + schedule_expression = "cron(0 18 ? * 3#2 *)" # Time Zone in UTC +} + +resource "aws_cloudwatch_event_target" "trigger_lambda_monthly_once_start" { + count = local.is-production == true ? 1 : 0 + rule = aws_cloudwatch_event_rule.start_instance[0].name + target_id = "start-instance" + arn = aws_lambda_function.terraform_lambda_func_start[0].arn +} + +resource "aws_lambda_permission" "allow_cloudwatch_to_call_lambda_start" { + count = local.is-production == true ? 1 : 0 + statement_id = "AllowExecutionFromCloudWatch" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.terraform_lambda_func_start[0].function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.start_instance[0].arn +} \ No newline at end of file diff --git a/terraform/environments/ppud/iam.tf b/terraform/environments/ppud/iam.tf index c2c40933966..8f1aabea66f 100644 --- a/terraform/environments/ppud/iam.tf +++ b/terraform/environments/ppud/iam.tf @@ -859,7 +859,7 @@ resource "aws_iam_role_policy_attachment" "attach_lambda_policy_certificate_expi ## Production -/* + data "aws_iam_policy_document" "sns_topic_policy_ec2cw" { count = local.is-production == true ? 1 : 0 policy_id = "SnsTopicId" @@ -896,7 +896,6 @@ data "aws_iam_policy_document" "sns_topic_policy_ec2cw" { ] } } -*/ #################################################### # IAM User, Policy for MGN @@ -1138,4 +1137,247 @@ resource "aws_iam_role_policy_attachment" "attach_aws_signer_policy_to_aws_signe count = local.is-development == true ? 1 : 0 role = aws_iam_role.aws_signer_role_dev[0].name policy_arn = aws_iam_policy.aws_signer_policy_dev[0].arn +} + +############################################# +# IAM Role & Policy for Send CPU graph - DEV +############################################# + +resource "aws_iam_role" "lambda_role_cloudwatch_get_metric_data_dev" { + count = local.is-development == true ? 1 : 0 + name = "PPUD_Lambda_Function_Role_Cloudwatch_Get_Metric_Data_Dev" + assume_role_policy = < + +

Hi Team,

+

Please find below the weekly PPUD Email Report.

+ PPUD Email Report +

This is an automated email.

+ + + """ + + # Create the email message + msg = MIMEMultipart("alternative") + msg["From"] = SENDER + msg["To"] = ", ".join(RECIPIENTS) + msg["Subject"] = SUBJECT + + # Attach the HTML body + msg.attach(MIMEText(email_body, "html")) + + # Send the email + try: + response = ses_client.send_raw_email( + Source=SENDER, + Destinations=RECIPIENTS, + RawMessage={"Data": msg.as_string()}, + ) + print("Email sent! Message ID:", response["MessageId"]) + except Exception as e: + print("Error sending email:", e) + raise + +def lambda_handler(event, context): + pattern = r'to=<' + data = {} + + for file_name in file_names: + content = retrieve_file_from_s3(bucket_name, file_name) + count = count_occurrences(content, pattern) + data[file_name] = count + + graph_base64 = create_graph(data) + + # Send email with the graph embedded + print("Sending email...") + #email_image_to_users(graph_image.getvalue()) + send_email_with_graph(graph_base64) + + return { + 'statusCode': 200, + 'body': 'Email sent successfully!' + } diff --git a/terraform/environments/ppud/lambda_scripts/send_cpu_graph_dev.py b/terraform/environments/ppud/lambda_scripts/send_cpu_graph_dev.py new file mode 100644 index 00000000000..c707aa7f453 --- /dev/null +++ b/terraform/environments/ppud/lambda_scripts/send_cpu_graph_dev.py @@ -0,0 +1,103 @@ +import boto3 +import datetime +import os +os.environ['MPLCONFIGDIR'] = "/tmp/graph" +import matplotlib.pyplot as plt +import io +import base64 +from botocore.exceptions import ClientError + +# Initialize clients +cloudwatch = boto3.client('cloudwatch') +ses = boto3.client('ses') + +def lambda_handler(event, context): + # 1. Define parameters + instance_id = event.get('InstanceId', 'i-0c98db0c20242e12c') # Replace with your instance ID + email_recipient = event.get('RecipientEmail', 'nick.buckingham@colt.net') # Replace with recipient email + email_sender = 'noreply@internaltest.ppud.justice.gov.uk' # Replace with a verified SES sender email + region = 'eu-west-2' # Replace with your AWS region + + # 2. Fetch CPU Utilization data from CloudWatch + try: + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + 'Id': 'cpuUtilization', + 'MetricStat': { + 'Metric': { + 'Namespace': 'AWS/EC2', + 'MetricName': 'CPUUtilization', + 'Dimensions': [{'Name': 'InstanceId', 'Value': instance_id}] + }, + 'Period': 300, # 5 minutes + 'Stat': 'Average' + }, + 'ReturnData': True + } + ], + StartTime=datetime.datetime.utcnow() - datetime.timedelta(hours=168), + EndTime=datetime.datetime.utcnow() + ) + except ClientError as e: + print(f"Error fetching CloudWatch data: {e}") + return {'statusCode': 500, 'body': str(e)} + + # 3. Process data for graphing + timestamps = [] + values = [] + for data_point in response['MetricDataResults'][0]['Timestamps']: + timestamps.append(data_point) + for data_point in response['MetricDataResults'][0]['Values']: + values.append(data_point) + + # Sort data by timestamps + sorted_data = sorted(zip(timestamps, values)) + timestamps, values = zip(*sorted_data) + + # 4. Generate a graph + plt.figure(figsize=(10, 6)) + plt.plot(timestamps, values, label="CPU Utilization", marker='o') + plt.xlabel('Time') + plt.ylabel('CPU Utilization (%)') + plt.title(f'CPU Utilization for {instance_id}') + plt.legend() + plt.grid(True) + + # Save the graph to memory + image_buffer = io.BytesIO() + plt.savefig(image_buffer, format='png') + image_buffer.seek(0) + + # Convert to base64 for email attachment + graph_base64 = base64.b64encode(image_buffer.getvalue()).decode('utf-8') + image_buffer.close() + + # 5. Send the email via SES + email_subject = f"CPU Utilization Graph for {instance_id}" + email_body = ( + f"Attached is the CPU Utilization graph for the EC2 instance {instance_id} " + f"for the past week. \n\nBest regards,\nYour Monitoring Team" + ) + + email_html_body = ( + f"

{email_body}

" + ) + + try: + ses.send_email( + Source=email_sender, + Destination={'ToAddresses': [email_recipient]}, + Message={ + 'Subject': {'Data': email_subject}, + 'Body': { + 'Html': {'Data': email_html_body} + } + } + ) + print(f"Email sent successfully to {email_recipient}") + except ClientError as e: + print(f"Error sending email: {e}") + return {'statusCode': 500, 'body': str(e)} + + return {'statusCode': 200, 'body': 'Email sent successfully'} diff --git a/terraform/environments/ppud/lambda_scripts/send_cpu_graph_prod.py b/terraform/environments/ppud/lambda_scripts/send_cpu_graph_prod.py new file mode 100644 index 00000000000..0927365bc61 --- /dev/null +++ b/terraform/environments/ppud/lambda_scripts/send_cpu_graph_prod.py @@ -0,0 +1,135 @@ +import boto3 +import os +os.environ['MPLCONFIGDIR'] = "/tmp/graph" +import matplotlib.pyplot as plt +from datetime import datetime, timedelta +import io +import base64 +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +# Initialize boto3 clients +cloudwatch = boto3.client('cloudwatch') + +# Configuration +current_date = datetime.now().strftime('%a %d %b %Y') +INSTANCE_ID = "i-029d2b17679dab982" +start_time = datetime(2024, 12, 4, 8, 0, 0) +end_time = datetime(2024, 12, 4, 17, 0, 0) +SENDER = "donotreply@cjsm.secure-email.ppud.justice.gov.uk" +RECIPIENTS = ["nick.buckingham@colt.net"] +SUBJECT = f'EC2 CPU Utilization Report - {current_date}' +REGION = "eu-west-2" +IMAGE_ID = "ami-02f8251c8cdf2464f" +INSTANCE_TYPE = "m5.xlarge" + +def get_metric_data(namespace, metric_name, dimensions): + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + 'Id': 'm1', + 'MetricStat': { + 'Metric': { + 'Namespace': namespace, + 'MetricName': metric_name, + 'Dimensions': dimensions + }, + 'Period': 300, + 'Stat': 'Maximum' + }, + 'ReturnData': True + }, + ], + StartTime=start_time, + EndTime=end_time + ) + return response['MetricDataResults'][0] + +def create_graph(cpu_data, converttopdf_data, pdfcrawler2app_data, winword_data, wmiprvse_data, createthumbnails_data): + plt.figure(figsize=(20, 5)) + plt.plot(cpu_data['Timestamps'], cpu_data['Values'], label='Total Server CPU Utilization', marker="o", linestyle="-", color="teal") + plt.plot(converttopdf_data['Timestamps'], converttopdf_data['Values'], label='Convert to PDF CPU Utilization', marker="o", linestyle="--", color="royalblue") + plt.plot(pdfcrawler2app_data['Timestamps'], pdfcrawler2app_data['Values'], label='PDF Crawler CPU Utilization', marker="o", linestyle="--", color="cyan") + plt.plot(winword_data['Timestamps'], winword_data['Values'], label='Microsoft Word CPU Utilization', marker="o", linestyle="--", color="orange") + plt.plot(wmiprvse_data['Timestamps'], wmiprvse_data['Values'], label='WMIPrvSE CPU Utilization', marker="o", linestyle="--", color="red") + plt.plot(createthumbnails_data['Timestamps'], createthumbnails_data['Values'], label='Create Thumbnails CPU Utilization', marker="o", linestyle="--", color="springgreen") + plt.xlabel('Time') + plt.ylabel('CPU Utilization (%)') + plt.title(f'EC2 CPU Utilization - {INSTANCE_ID} - {current_date}') + plt.legend() + plt.grid(True) + plt.tight_layout() + + # Save the graph to a temporary buffer + temp_file = "/tmp/cpu_utilization_graph.png" + plt.savefig(temp_file) + plt.close() + + # Read the image and encode it to base64 + with open(temp_file, "rb") as image_file: + encoded_string = base64.b64encode(image_file.read()).decode("utf-8") + + # Cleanup temporary file + os.remove(temp_file) + return encoded_string + +def email_image_to_users(graph_base64): + """ + Send an email with the graph embedded in the email body using AWS SES. + """ + ses_client = boto3.client("ses", region_name=REGION) + + # Email body with the embedded image + email_body = f""" + + +

Hi Team,

+

Please find below the CPU utilization metrics for EC2 instance {INSTANCE_ID} for today from 08:00 to 17:00.

+ CPU Utilization Graph +

This is an automated email.

+ + + """ + + # Create the email message + msg = MIMEMultipart("alternative") + msg["From"] = SENDER + msg["To"] = ", ".join(RECIPIENTS) + msg["Subject"] = SUBJECT + + # Attach the HTML body + msg.attach(MIMEText(email_body, "html")) + + # Send the email + try: + response = ses_client.send_raw_email( + Source=SENDER, + Destinations=RECIPIENTS, + RawMessage={"Data": msg.as_string()}, + ) + print("Email sent! Message ID:", response["MessageId"]) + except Exception as e: + print("Error sending email:", e) + raise + +def lambda_handler(event, context): + cpu_data = get_metric_data('AWS/EC2', 'CPUUtilization', [{'Name': 'InstanceId', 'Value': INSTANCE_ID}]) + converttopdf_data = get_metric_data('CWAgent', 'procstat cpu_usage', [{'Name': 'InstanceId', 'Value': INSTANCE_ID}, {'Name': 'process_name', 'Value': 'converttopdf.exe'}, {'Name': 'exe', 'Value': 'converttopdf'}, {'Name': 'ImageId', 'Value': IMAGE_ID}, {'Name': 'InstanceType', 'Value': INSTANCE_TYPE}]) + pdfcrawler2app_data = get_metric_data('CWAgent', 'procstat cpu_usage', [{'Name': 'InstanceId', 'Value': INSTANCE_ID}, {'Name': 'process_name', 'Value': 'pdfcrawler2app.exe'}, {'Name': 'exe', 'Value': 'pdfcrawler2app'}, {'Name': 'ImageId', 'Value': IMAGE_ID}, {'Name': 'InstanceType', 'Value': INSTANCE_TYPE}]) + winword_data = get_metric_data('CWAgent', 'procstat cpu_usage', [{'Name': 'InstanceId', 'Value': INSTANCE_ID}, {'Name': 'process_name', 'Value': 'winword.exe'}, {'Name': 'exe', 'Value': 'winword'}, {'Name': 'ImageId', 'Value': IMAGE_ID}, {'Name': 'InstanceType', 'Value': INSTANCE_TYPE}]) + wmiprvse_data = get_metric_data('CWAgent', 'procstat cpu_usage', [{'Name': 'InstanceId', 'Value': INSTANCE_ID}, {'Name': 'process_name', 'Value': 'wmiprcse.exe'}, {'Name': 'exe', 'Value': 'wmiprcse'}, {'Name': 'ImageId', 'Value': IMAGE_ID}, {'Name': 'InstanceType', 'Value': INSTANCE_TYPE}]) + createthumbnails_data = get_metric_data('CWAgent', 'procstat cpu_usage', [{'Name': 'InstanceId', 'Value': INSTANCE_ID}, {'Name': 'process_name', 'Value': 'createthumbnails.exe'}, {'Name': 'exe', 'Value': 'createthumbnails'}, {'Name': 'ImageId', 'Value': IMAGE_ID}, {'Name': 'InstanceType', 'Value': INSTANCE_TYPE}]) + + # Create a graph and encode it as base64 + print("Creating graph...") + graph_base64 = create_graph(cpu_data, converttopdf_data, pdfcrawler2app_data, winword_data, wmiprvse_data, createthumbnails_data) + + # Send email with the graph embedded + print("Sending email...") + #email_image_to_users(graph_image.getvalue()) + email_image_to_users(graph_base64) + + return { + 'statusCode': 200, + 'body': 'Graph uploaded to S3 successfully!' + } diff --git a/terraform/environments/ppud/platform_secrets.tf b/terraform/environments/ppud/platform_secrets.tf index bb006856534..63cd8226b61 100644 --- a/terraform/environments/ppud/platform_secrets.tf +++ b/terraform/environments/ppud/platform_secrets.tf @@ -15,3 +15,23 @@ data "aws_secretsmanager_secret_version" "environment_management" { provider = aws.modernisation-platform secret_id = data.aws_secretsmanager_secret.environment_management.id } + +# Klayers Account ID - used by lambda layer ARNs - https://github.com/keithrozario/Klayers?tab=readme-ov-file +data "aws_ssm_parameter" "klayers_account_dev" { + count = local.is-development == true ? 1 : 0 + name = "klayers-account" + with_decryption = true +} + +# Klayers Account ID - used by lambda layer ARNs - https://github.com/keithrozario/Klayers?tab=readme-ov-file +data "aws_ssm_parameter" "klayers_account_prod" { + count = local.is-production == true ? 1 : 0 + name = "klayers-account" + with_decryption = true +} + +# This ID is the elb-account-id for eu-west-2 obtained from https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html +data "aws_ssm_parameter" "elb-account-eu-west-2" { + name = "elb-account-eu-west-2" + with_decryption = true +} \ No newline at end of file diff --git a/terraform/environments/ppud/s3.tf b/terraform/environments/ppud/s3.tf index c27b3b03114..9439d907b4b 100644 --- a/terraform/environments/ppud/s3.tf +++ b/terraform/environments/ppud/s3.tf @@ -899,3 +899,259 @@ resource "aws_s3_bucket_policy" "moj-log-files-dev" { ] }) } + +# S3 Bucket for Lambda Layers for Development + +resource "aws_s3_bucket" "moj-lambda-layers-dev" { + # checkov:skip=CKV_AWS_145: "S3 bucket is not public facing, does not contain any sensitive information and does not need encryption" + # checkov:skip=CKV_AWS_62: "S3 bucket event notification is not required" + # checkov:skip=CKV2_AWS_62: "S3 bucket event notification is not required" + # checkov:skip=CKV_AWS_144: "PPUD has a UK Sovereignty requirement so cross region replication is prohibited" + # checkov:skip=CKV_AWS_18: "S3 bucket logging is not required" + count = local.is-development == true ? 1 : 0 + bucket = "moj-lambda-layers-dev" + tags = merge( + local.tags, + { + Name = "${local.application_name}-moj-lambda-layers-dev" + } + ) +} + +resource "aws_s3_bucket_versioning" "moj-lambda-layers-dev" { + count = local.is-development == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-dev[0].id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_public_access_block" "moj-lambda-layers-dev" { + count = local.is-development == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-dev[0].id + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_lifecycle_configuration" "moj-lambda-layers-dev" { + # checkov:skip=CKV_AWS_300: "S3 bucket has a set period for aborting failed uploads, this is a false positive finding" + count = local.is-development == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-dev[0].id + rule { + id = "Move-to-IA-then-delete-moj-lambda-layers-dev" + status = "Enabled" + abort_incomplete_multipart_upload { + days_after_initiation = 7 + } + noncurrent_version_transition { + noncurrent_days = 30 + storage_class = "STANDARD_IA" + } + transition { + days = 30 + storage_class = "STANDARD_IA" + } + expiration { + days = 60 + } + } +} + +resource "aws_s3_bucket_policy" "moj-lambda-layers-dev" { + count = local.is-development == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-dev[0].id + + policy = jsonencode({ + + "Version" : "2012-10-17", + "Statement" : [ + { + "Action" : [ + "s3:PutBucketNotification", + "s3:GetBucketNotification", + "s3:GetBucketAcl", + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:s3:::moj-lambda-layers-dev", + "arn:aws:s3:::moj-lambda-layers-dev/*" + ], + "Principal" : { + Service = "logging.s3.amazonaws.com" + } + }, + { + "Action" : [ + "s3:PutBucketNotification", + "s3:GetBucketNotification", + "s3:GetBucketAcl", + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:s3:::moj-lambda-layers-dev", + "arn:aws:s3:::moj-lambda-layers-dev/*" + ], + "Principal" : { + Service = "sns.amazonaws.com" + } + }, + { + "Action" : [ + "s3:GetBucketAcl", + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:s3:::moj-lambda-layers-dev", + "arn:aws:s3:::moj-lambda-layers-dev/*" + ], + "Principal" : { + "AWS" : [ + "arn:aws:iam::${local.environment_management.account_ids["ppud-development"]}:role/ec2-iam-role" + ] + } + } + ] + }) +} + +# S3 Bucket for Lambda Layers for Production + +resource "aws_s3_bucket" "moj-lambda-layers-prod" { + # checkov:skip=CKV_AWS_145: "S3 bucket is not public facing, does not contain any sensitive information and does not need encryption" + # checkov:skip=CKV_AWS_62: "S3 bucket event notification is not required" + # checkov:skip=CKV2_AWS_62: "S3 bucket event notification is not required" + # checkov:skip=CKV_AWS_144: "PPUD has a UK Sovereignty requirement so cross region replication is prohibited" + # checkov:skip=CKV_AWS_18: "S3 bucket logging is not required" + count = local.is-production == true ? 1 : 0 + bucket = "moj-lambda-layers-prod" + tags = merge( + local.tags, + { + Name = "${local.application_name}-moj-lambda-layers-prod" + } + ) +} + +resource "aws_s3_bucket_versioning" "moj-lambda-layers-prod" { + count = local.is-production == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-prod[0].id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_public_access_block" "moj-lambda-layers-prod" { + count = local.is-production == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-prod[0].id + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_lifecycle_configuration" "moj-lambda-layers-prod" { + # checkov:skip=CKV_AWS_300: "S3 bucket has a set period for aborting failed uploads, this is a false positive finding" + count = local.is-production == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-prod[0].id + rule { + id = "Move-to-IA-then-delete-moj-lambda-layers-prod" + status = "Enabled" + abort_incomplete_multipart_upload { + days_after_initiation = 7 + } + noncurrent_version_transition { + noncurrent_days = 30 + storage_class = "STANDARD_IA" + } + transition { + days = 30 + storage_class = "STANDARD_IA" + } + expiration { + days = 60 + } + } +} + +resource "aws_s3_bucket_policy" "moj-lambda-layers-prod" { + count = local.is-production == true ? 1 : 0 + bucket = aws_s3_bucket.moj-lambda-layers-prod[0].id + + policy = jsonencode({ + + "Version" : "2012-10-17", + "Statement" : [ + { + "Action" : [ + "s3:PutBucketNotification", + "s3:GetBucketNotification", + "s3:GetBucketAcl", + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:s3:::moj-lambda-layers-prod", + "arn:aws:s3:::moj-lambda-layers-prod/*" + ], + "Principal" : { + Service = "logging.s3.amazonaws.com" + } + }, + { + "Action" : [ + "s3:PutBucketNotification", + "s3:GetBucketNotification", + "s3:GetBucketAcl", + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:s3:::moj-lambda-layers-prod", + "arn:aws:s3:::moj-lambda-layers-prod/*" + ], + "Principal" : { + Service = "sns.amazonaws.com" + } + }, + { + "Action" : [ + "s3:GetBucketAcl", + "s3:DeleteObject", + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:s3:::moj-lambda-layers-prod", + "arn:aws:s3:::moj-lambda-layers-prod/*" + ], + "Principal" : { + "AWS" : [ + "arn:aws:iam::${local.environment_management.account_ids["ppud-production"]}:role/ec2-iam-role" + ] + } + } + ] + }) +} diff --git a/terraform/environments/ppud/security_group.tf b/terraform/environments/ppud/security_group.tf index c341c214913..dfb37a7ffc7 100644 --- a/terraform/environments/ppud/security_group.tf +++ b/terraform/environments/ppud/security_group.tf @@ -416,6 +416,84 @@ resource "aws_security_group_rule" "Primary-DOC-Server-Egress-2" { } +resource "aws_security_group" "Live-DOC-Server" { + count = local.is-preproduction == false ? 1 : 0 + vpc_id = data.aws_vpc.shared.id + name = "Live-DOC-Server" + description = "Live-DOC-Server for DEV & PROD" + + tags = { + Name = "${var.networking[0].business-unit}-${local.environment}" + } +} + +resource "aws_security_group_rule" "Live-DOC-Server-Ingress" { + description = "Rule to allow port 80 traffic inbound" + count = local.is-preproduction == false ? 1 : 0 + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Live-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Live-DOC-Server-Ingress-1" { + description = "Rule to allow port 445 traffic inbound" + count = local.is-preproduction == false ? 1 : 0 + type = "ingress" + from_port = 445 + to_port = 445 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Live-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Live-DOC-Server-Ingress-2" { + description = "Rule to allow port 3389 traffic inbound" + count = local.is-preproduction == false ? 1 : 0 + type = "ingress" + from_port = 3389 + to_port = 3389 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Live-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Live-DOC-Server-Egress" { + description = "Rule to allow all traffic outbound" + count = local.is-preproduction == false ? 1 : 0 + type = "egress" + from_port = 0 + to_port = 0 + protocol = "all" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Live-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Live-DOC-Server-Egress-1" { + description = "Rule to allow port 443 traffic outbound" + count = local.is-preproduction == false ? 1 : 0 + type = "egress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = aws_security_group.Live-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Live-DOC-Server-Egress-2" { + description = "Rule to allow port 80 traffic outbound" + count = local.is-preproduction == false ? 1 : 0 + type = "egress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = aws_security_group.Live-DOC-Server[0].id +} + + resource "aws_security_group" "Secondary-DOC-Server" { count = local.is-preproduction == false ? 1 : 0 vpc_id = data.aws_vpc.shared.id @@ -493,6 +571,83 @@ resource "aws_security_group_rule" "Secondary-DOC-Server-Egress-2" { security_group_id = aws_security_group.Secondary-DOC-Server[0].id } +resource "aws_security_group" "Archive-DOC-Server" { + count = local.is-preproduction == false ? 1 : 0 + vpc_id = data.aws_vpc.shared.id + name = "Archive-DOC-Server" + description = "Archive-DOC-Server for DEV & PROD" + + tags = { + Name = "${var.networking[0].business-unit}-${local.environment}" + } +} + +resource "aws_security_group_rule" "Archive-DOC-Server-Ingress" { + description = "Rule to allow port 80 traffic inbound" + count = local.is-preproduction == false ? 1 : 0 + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Archive-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Archive-DOC-Server-Ingress-1" { + description = "Rule to allow port 445 traffic inbound" + count = local.is-preproduction == false ? 1 : 0 + type = "ingress" + from_port = 445 + to_port = 445 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Archive-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Archive-DOC-Server-Ingress-2" { + description = "Rule to allow port 3389 traffic inbound" + count = local.is-preproduction == false ? 1 : 0 + type = "ingress" + from_port = 3389 + to_port = 3389 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Archive-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Archive-DOC-Server-Egress" { + description = "Rule to allow all traffic outbound" + count = local.is-preproduction == false ? 1 : 0 + type = "egress" + from_port = 0 + to_port = 0 + protocol = "all" + cidr_blocks = [data.aws_vpc.shared.cidr_block] + security_group_id = aws_security_group.Archive-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Archive-DOC-Server-Egress-1" { + description = "Rule to allow port 443 traffic outbound" + count = local.is-preproduction == false ? 1 : 0 + type = "egress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = aws_security_group.Archive-DOC-Server[0].id +} + +resource "aws_security_group_rule" "Archive-DOC-Server-Egress-2" { + description = "Rule to allow port 80 traffic outbound" + count = local.is-preproduction == false ? 1 : 0 + type = "egress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = aws_security_group.Archive-DOC-Server[0].id +} + resource "aws_security_group" "PPUD-Database-Server" { count = local.is-development == true ? 1 : 0 vpc_id = data.aws_vpc.shared.id diff --git a/terraform/environments/ppud/shield.tf b/terraform/environments/ppud/shield.tf index 00708681268..4a731e41297 100644 --- a/terraform/environments/ppud/shield.tf +++ b/terraform/environments/ppud/shield.tf @@ -20,9 +20,3 @@ module "shield" { } } } - -import { - for_each = local.is-production ? { "build" = true } : {} - id = "60a72081-57ea-4a38-b04a-778796012304/FMManagedWebACLV2-shield_advanced_auto_remediate-1649415357278/REGIONAL" - to = module.shield["build"].aws_wafv2_web_acl.main -} diff --git a/terraform/environments/ppud/sns.tf b/terraform/environments/ppud/sns.tf index f7025b4817a..8e6f6133e7d 100644 --- a/terraform/environments/ppud/sns.tf +++ b/terraform/environments/ppud/sns.tf @@ -65,6 +65,7 @@ resource "aws_sns_topic_subscription" "cw_sms_subscription4" { } */ +/* resource "aws_sns_topic_policy" "sns_topic_policy_ec2cw" { count = local.is-production == true ? 1 : 0 arn = aws_sns_topic.cw_alerts[0].arn @@ -98,6 +99,7 @@ resource "aws_sns_topic_policy" "sns_topic_policy_ec2cw" { ] }) } +*/ # PreProduction - Cloud Watch diff --git a/terraform/environments/tribunals/README.md b/terraform/environments/tribunals/README.md index fe007aed4e7..ab8f96ab74f 100644 --- a/terraform/environments/tribunals/README.md +++ b/terraform/environments/tribunals/README.md @@ -74,4 +74,3 @@ Modernisation Platform ### **How to resolve specific issues:** - diff --git a/terraform/environments/tribunals/cloudfront.tf b/terraform/environments/tribunals/cloudfront.tf index 286aadd461a..3d8d5c8005f 100644 --- a/terraform/environments/tribunals/cloudfront.tf +++ b/terraform/environments/tribunals/cloudfront.tf @@ -1,17 +1,23 @@ resource "aws_cloudfront_distribution" "tribunals_distribution" { - aliases = ["*.${var.networking[0].application}.${var.networking[0].business-unit}-${local.environment}.modernisation-platform.service.justice.gov.uk"] + web_acl_id = aws_wafv2_web_acl.tribunals_web_acl.arn + + aliases = local.is-production ? [ + "*.decisions.tribunals.gov.uk", + "*.venues.tribunals.gov.uk", + "*.reports.tribunals.gov.uk" + ] : ["*.${var.networking[0].application}.${var.networking[0].business-unit}-${local.environment}.modernisation-platform.service.justice.gov.uk"] origin { domain_name = aws_lb.tribunals_lb.dns_name origin_id = "tribunalsOrigin" custom_origin_config { - http_port = 80 - https_port = 443 - origin_protocol_policy = "https-only" - origin_ssl_protocols = ["TLSv1.2"] + http_port = 80 + https_port = 443 + origin_protocol_policy = "https-only" + origin_ssl_protocols = ["TLSv1.2"] origin_keepalive_timeout = 60 - origin_read_timeout = 60 + origin_read_timeout = 60 } custom_header { @@ -23,7 +29,7 @@ resource "aws_cloudfront_distribution" "tribunals_distribution" { default_cache_behavior { target_origin_id = "tribunalsOrigin" - cache_policy_id = data.aws_cloudfront_cache_policy.caching_disabled.id + cache_policy_id = data.aws_cloudfront_cache_policy.caching_disabled.id origin_request_policy_id = data.aws_cloudfront_origin_request_policy.all_viewer.id viewer_protocol_policy = "redirect-to-https" diff --git a/terraform/environments/tribunals/dns-delegate-route53.tf b/terraform/environments/tribunals/dns-delegate-route53.tf index d2ec71520f1..b7a7f468fd2 100644 --- a/terraform/environments/tribunals/dns-delegate-route53.tf +++ b/terraform/environments/tribunals/dns-delegate-route53.tf @@ -1,8 +1,7 @@ locals { ec2_records = [ - "decisions", - "asylumsupport.decisions" + "decisions" ] ec2_records_migrated = [ @@ -15,7 +14,8 @@ locals { "siac.decisions", "taxandchancery_ut.decisions", "charity.decisions", - "phl.decisions" + "phl.decisions", + "asylumsupport.decisions" ] afd_records_migrated = [ @@ -32,6 +32,9 @@ locals { ] nginx_records = [ + ] + + nginx_records_pre_migration = [ "", "adjudicationpanel", "charity", @@ -97,7 +100,7 @@ resource "aws_route53_record" "afd_instances_migrated" { name = local.afd_records_migrated[count.index] type = "CNAME" ttl = 300 - records = [aws_lb.tribunals_lb.dns_name] + records = [aws_cloudfront_distribution.tribunals_distribution.domain_name] } # 'A' records for tribunals URLs routed through the NGINX reverse proxy hosted in AWS DSD Account @@ -117,6 +120,20 @@ resource "aws_route53_record" "nginx_instances" { } } +resource "aws_route53_record" "nginx_instances_pre_migration" { + count = local.is-production ? length(local.nginx_records_pre_migration) : 0 + provider = aws.core-network-services + zone_id = local.production_zone_id + name = local.nginx_records_pre_migration[count.index] + type = "A" + + alias { + name = "tribunals-nginx-1184258455.eu-west-1.elb.amazonaws.com" + zone_id = "Z32O12XQLNTSW2" + evaluate_target_health = false + } +} + # 'A' records for tribunals www. URLs redirects to existing entries - subtract the "www." resource "aws_route53_record" "www_instances" { count = local.is-production ? length(local.www_records) : 0 diff --git a/terraform/environments/tribunals/dns_ssl.tf b/terraform/environments/tribunals/dns_ssl.tf index cc96e8d1747..c6a506f8dfe 100644 --- a/terraform/environments/tribunals/dns_ssl.tf +++ b/terraform/environments/tribunals/dns_ssl.tf @@ -104,6 +104,11 @@ variable "services" { module_key = "transportappeals" port = 49109 }, + "asylum_support" = { + name_prefix = "asylumsupport" + module_key = "asylumsupport" + port = 49120 + }, "charity_tribunal_decisions" = { name_prefix = "charity" module_key = "charity" @@ -165,7 +170,6 @@ variable "web_app_services" { port = 49100 app_db_name = "ossc" sql_setup_path = "/db_setup_scripts/administrative_appeals" - sql_post_setup_path = "/db_post_setup_scripts/administrative_appeals" }, "ahmlr" = { name_prefix = "landregistrationdivision" @@ -173,7 +177,6 @@ variable "web_app_services" { port = 49101 app_db_name = "hmlands" sql_setup_path = "/db_setup_scripts/ahmlr" - sql_post_setup_path = "/db_post_setup_scripts/ahmlr" } "care_standards" = { name_prefix = "carestandards" @@ -181,7 +184,6 @@ variable "web_app_services" { port = 49102 app_db_name = "carestandards" sql_setup_path = "/db_setup_scripts/care_standards" - sql_post_setup_path = "/db_post_setup_scripts/care_standards" }, "cicap" = { name_prefix = "cicap" @@ -189,7 +191,6 @@ variable "web_app_services" { port = 49103 app_db_name = "cicap" sql_setup_path = "/db_setup_scripts/cicap" - sql_post_setup_path = "/db_post_setup_scripts/cicap" }, "employment_appeals" = { name_prefix = "employmentappeals" @@ -197,7 +198,6 @@ variable "web_app_services" { port = 49104 app_db_name = "eat" sql_setup_path = "/db_setup_scripts/employment_appeals" - sql_post_setup_path = "/db_post_setup_scripts/employment_appeals" }, "finance_and_tax" = { name_prefix = "financeandtax" @@ -205,7 +205,6 @@ variable "web_app_services" { port = 49105 app_db_name = "ftt" sql_setup_path = "/db_setup_scripts/finance_and_tax" - sql_post_setup_path = "/db_post_setup_scripts/finance_and_tax" }, "immigration_services" = { name_prefix = "immigrationservices" @@ -213,7 +212,6 @@ variable "web_app_services" { port = 49106 app_db_name = "imset" sql_setup_path = "/db_setup_scripts/immigration_services" - sql_post_setup_path = "/db_post_setup_scripts/immigration_services" }, "information_tribunal" = { name_prefix = "informationrights" @@ -221,7 +219,6 @@ variable "web_app_services" { port = 49107 app_db_name = "it" sql_setup_path = "/db_setup_scripts/information_tribunal" - sql_post_setup_path = "/db_post_setup_scripts/information_tribunal" }, "lands_tribunal" = { name_prefix = "landschamber" @@ -229,7 +226,6 @@ variable "web_app_services" { port = 49108 app_db_name = "lands" sql_setup_path = "/db_setup_scripts/lands_chamber" - sql_post_setup_path = "/db_post_setup_scripts/lands_chamber" }, "transport" = { name_prefix = "transportappeals" @@ -237,7 +233,13 @@ variable "web_app_services" { port = 49109 app_db_name = "transport" sql_setup_path = "/db_setup_scripts/transport" - sql_post_setup_path = "/db_post_setup_scripts/transport" + }, + "asylum_support" = { + name_prefix = "asylumsupport" + module_key = "asylum_support" + port = 49120 + app_db_name = "asadj" + sql_setup_path = "/db_setup_scripts/asylum_support" } } } @@ -309,6 +311,7 @@ locals { information_tribunal = module.information_tribunal lands_tribunal = module.lands_tribunal transport = module.transport + asylum_support = module.asylum_support charity_tribunal_decisions = module.charity_tribunal_decisions claims_management_decisions = module.claims_management_decisions consumer_credit_appeals = module.consumer_credit_appeals diff --git a/terraform/environments/tribunals/ec2-shared-user-data.sh b/terraform/environments/tribunals/ec2-shared-user-data.sh index c637de7bbd9..c4e736aa360 100644 --- a/terraform/environments/tribunals/ec2-shared-user-data.sh +++ b/terraform/environments/tribunals/ec2-shared-user-data.sh @@ -5,7 +5,7 @@ $targetDrive = "D" $targetPath = $targetDrive + ":\storage\tribunals\" $ecsCluster = "tribunals-all-cluster" $ebsVolumeTag = "tribunals-all-storage" -$tribunalNames = "appeals","transport","care-standards","cicap","employment-appeals","finance-and-tax","immigration-services","information-tribunal","hmlands","lands-chamber", "ftp-admin-appeals", "ftp-tax-tribunal", "ftp-tax-chancery", "ftp-sscs-venues", "ftp-siac", "ftp-primary-health", "ftp-estate-agents", "ftp-consumer-credit", "ftp-claims-management", "ftp-charity-tribunals" +$tribunalNames = "appeals","transport","care-standards","cicap","employment-appeals","finance-and-tax","immigration-services","information-tribunal","hmlands","lands-chamber", "asylum-support", "ftp-admin-appeals", "ftp-tax-tribunal", "ftp-tax-chancery", "ftp-sscs-venues", "ftp-siac", "ftp-primary-health", "ftp-estate-agents", "ftp-consumer-credit", "ftp-claims-management", "ftp-charity-tribunals" $monitorLogFile = "C:\ProgramData\Amazon\EC2-Windows\Launch\Log\monitorLogFile.log" $monitorScriptFile = "C:\ProgramData\Amazon\EC2-Windows\Launch\monitor-ebs.ps1" diff --git a/terraform/environments/tribunals/lambda.tf b/terraform/environments/tribunals/lambda.tf index 40a6aa9f976..6392b24c165 100644 --- a/terraform/environments/tribunals/lambda.tf +++ b/terraform/environments/tribunals/lambda.tf @@ -182,7 +182,6 @@ resource "aws_lambda_function" "app_post_migrate" { ADMIN_PASSWORD = jsondecode(data.aws_secretsmanager_secret_version.tribunals_admin_site_credentials_secret_current.secret_string)["admin_password"] ADMIN_PASSWORD_EAT = jsondecode(data.aws_secretsmanager_secret_version.tribunals_admin_site_credentials_secret_current.secret_string)["admin_password_eat"] NEW_DB_NAME = each.value.app_db_name - APP_FOLDER = each.value.sql_post_setup_path } } diff --git a/terraform/environments/tribunals/lambda_function/app_post_migrate.py b/terraform/environments/tribunals/lambda_function/app_post_migrate.py index 62d343a3033..59f241c6da4 100644 --- a/terraform/environments/tribunals/lambda_function/app_post_migrate.py +++ b/terraform/environments/tribunals/lambda_function/app_post_migrate.py @@ -8,7 +8,6 @@ def lambda_handler(event, context): user_name = os.getenv("USER_NAME") password = os.getenv("PASSWORD") new_db_name = os.getenv("NEW_DB_NAME") - app_folder = os.getenv("APP_FOLDER") admin_username = os.getenv("ADMIN_USERNAME") admin_password = os.getenv("ADMIN_PASSWORD") admin_password_eat = os.getenv("ADMIN_PASSWORD_EAT") diff --git a/terraform/environments/tribunals/lambda_function/app_setup_db.py b/terraform/environments/tribunals/lambda_function/app_setup_db.py index d7dddabe058..06de1d1b583 100644 --- a/terraform/environments/tribunals/lambda_function/app_setup_db.py +++ b/terraform/environments/tribunals/lambda_function/app_setup_db.py @@ -49,8 +49,14 @@ def lambda_handler(event, context): statements = re.split(r'\bGO\b', script, flags=re.IGNORECASE) for statement in statements: if statement.strip(): - cursor.execute(statement) - conn.commit() + try: + print(f"Executing statement: {statement.strip()}") + cursor.execute(statement) + conn.commit() + except Exception as e: + print(f"Error executing statement: {e}") + print(f"SQL Statement: {statement.strip()}") + break # Exit on error # Closing the connection cursor.close() diff --git a/terraform/environments/tribunals/lambda_function/db_setup_deployment_package.zip b/terraform/environments/tribunals/lambda_function/db_setup_deployment_package.zip index 132d0ab0e7c..32e4424fdc2 100644 Binary files a/terraform/environments/tribunals/lambda_function/db_setup_deployment_package.zip and b/terraform/environments/tribunals/lambda_function/db_setup_deployment_package.zip differ diff --git a/terraform/environments/tribunals/lambda_function/db_setup_scripts/asylum_support/sp_migration.sql b/terraform/environments/tribunals/lambda_function/db_setup_scripts/asylum_support/sp_migration.sql new file mode 100644 index 00000000000..8a5c0994643 --- /dev/null +++ b/terraform/environments/tribunals/lambda_function/db_setup_scripts/asylum_support/sp_migration.sql @@ -0,0 +1,1145 @@ +use asadj +GO +/****** Object: StoredProcedure [dbo].[spSearchDecisionFinal] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spSearchDecisionFinal] + + @CurrentPage int, + @PageSize int, + @TotalRecords int output, + @strWhere nVarChar(4000) + AS +-- Create a temp table to hold the current page of data +-- Add and ID Column to count the decisions +Declare @sql As nVarChar(4000) +Select @sql = 'Create TABLE #TempTable +( +ID int PRIMARY KEY, +FirstName varchar(250), +LastName varchar(250), +DecisionDate datetime, +Ref1 varchar(100), +Ref2 varchar(100), +Ref3 varchar(100), +Ref4 varchar(100), +Landmark bit, CatDescription varChar(200), SubDescription varchar(200) +) +-- Fill the temp table with Decisions Data +INSERT INTO #TempTable +( +ID, +FirstName, +LastName, +DecisionDate, +Ref1, +Ref2, +Ref3, +Ref4, +Landmark, CatDescription, SubDescription +) +SELECT d.id AS ID , FirstName, LastName, DecisionDate, + Ref1, Ref2, Ref3, Ref4, Landmark, CatDescription, SubDescription +FROM decisions d INNER JOIN +category cat ON (d.CatID= cat.CatID) INNER JOIN + subcategory s ON (d.SubCatID =s.SubCatID) + WHERE 1 = 1 +ORDER BY d.DecisionDate DESC +-- Create variable to identify the first and last record that should be selected +DECLARE @FirstRec int, @LastRec int +SELECT @FirstRec = (@CurrentPage - 1) * @PageSize +SELECT @LastRec = (@CurrentPage * @PageSize + 1) + +Select +ID,FirstName,LastName,DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark, CatDescription, SubDescription +From #TempTable +WHERE +ID > @FirstRec +AND +ID < @LastRec + +SELECT @TotalRecords = COUNT(*) FROM decisions inner join adjudicator ON decisions.AdjID=adjudicator.AdjID' + /* SET NOCOUNT ON */ + EXEC sp_executesql @sql,@strWhere +GO +/****** Object: Table [dbo].[USERS] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[USERS]( + [UserID] [int] IDENTITY(1,1) NOT NULL, + [Firstname] [varchar](250) NULL, + [Lastname] [varchar](250) NULL, + [Username] [varchar](250) NULL, + [Password] [varchar](250) NULL, + CONSTRAINT [PK_USERS1] PRIMARY KEY CLUSTERED +( + [UserID] ASC +)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] +) ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: Table [dbo].[decisions] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[decisions]( + [ID] [int] IDENTITY(1,1) NOT NULL, + [AdjID] [int] NULL, + [CatID] [int] NULL, + [SubCatID] [int] NULL, + [DecisionDate] [datetime] NULL, + [Ref1] [varchar](200) NULL, + [Ref2] [varchar](200) NULL, + [Ref3] [varchar](200) NULL, + [Ref4] [varchar](200) NULL, + [Landmark] [bit] NULL, + [SubmitBy] [int] NULL, + [Summary] [ntext] NULL, + [Document] [image] NULL, + [DocType] [varchar](50) NULL, + [Doc_name] [varchar](200) NULL, + CONSTRAINT [PK_decisions1] PRIMARY KEY CLUSTERED +( + [ID] ASC +)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] +) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: Table [dbo].[category] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[category]( + [CatID] [int] IDENTITY(1,1) NOT NULL, + [CatDescription] [varchar](200) NULL, + CONSTRAINT [PK_category] PRIMARY KEY CLUSTERED +( + [CatID] ASC +)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] +) ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: Table [dbo].[adjudicator] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[adjudicator]( + [AdjID] [int] IDENTITY(1,1) NOT NULL, + [FirstName] [varchar](300) NULL, + [LastName] [varchar](300) NULL +) ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: Table [dbo].[FEEDBACK] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[FEEDBACK]( + [ID] [int] IDENTITY(1,1) NOT NULL, + [Question1] [varchar](200) NULL, + [Question2] [varchar](200) NULL, + [Question3] [varchar](200) NULL, + [Question4] [bit] NULL, + [Question5] [ntext] NULL, + [Question6] [datetime] NULL, + [Question7] [varchar](200) NULL, + [Question8] [varchar](200) NULL, + [Question9] [varchar](200) NULL, + [Question10] [varchar](200) NULL, + CONSTRAINT [PK_FEEDBACK] PRIMARY KEY CLUSTERED +( + [ID] ASC +)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] +) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: StoredProcedure [dbo].[spAddCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- Adds a new category into the category table + +CREATE PROCEDURE [dbo].[spAddCategory] + +@CatID int OUTPUT, +@CatDescription varchar(200) + +AS INSERT INTO category (CatDescription) + +VALUES (@CatDescription) + +SELECT @CatID = SCOPE_IDENTITY() +GO +/****** Object: StoredProcedure [dbo].[spAddAjudicator1] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spAddAjudicator1] + +@FirstName varchar(300), +@LastName varchar(300) + +AS + +INSERT + adjudicator + ( + FirstName, + LastName + ) + +VALUES + ( + @FirstName, + @LastName + ) +GO +/****** Object: StoredProcedure [dbo].[spAddAdjudicator] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- adds an adjudicator to the adjudicator table by entering a firstname an a lastname + +CREATE PROCEDURE [dbo].[spAddAdjudicator] +@AdjID int OUTPUT, +@Firstname varchar(50), +@Lastname varchar(50) + +AS INSERT INTO adjudicator (Firstname, Lastname) + +VALUES (@Firstname, @Lastname) + +SELECT @AdjID = SCOPE_IDENTITY() +GO +/****** Object: StoredProcedure [dbo].[spCategoryList] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- calls all categories from the category table, used for category dropdownlists etc + +CREATE PROC [dbo].[spCategoryList] AS +SELECT CatID, CatDescription FROM category +Order By CatID +GO +/****** Object: StoredProcedure [dbo].[spAdjudicator] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- selects firsname and lastname of adjudicators from adjudicator tabel, will be used for dropdownlists etc + +CREATE PROCEDURE [dbo].[spAdjudicator] AS +SELECT AdjID,Firstname, Lastname +FROM adjudicator +GO +/****** Object: StoredProcedure [dbo].[spAddUser] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spAddUser] + +@UserID integer OUTPUT, +@Username varchar(50), +@Password varchar(50), +@Firstname varchar(50), +@Lastname varchar(50) + +AS + +INSERT + Users + + ( + Username, + [Password], + Firstname, + Lastname + ) + +VALUES + + ( + @Username, + @Password, + @Firstname, + @Lastname + ) + +SELECT @UserID = SCOPE_IDENTITY() +GO +/****** Object: StoredProcedure [dbo].[spDeleteDecision] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- deletes a decision from the decisions table + +CREATE PROCEDURE [dbo].[spDeleteDecision] + +@ID int OUTPUT, +@AdjID int, +@CatID int, +@SubCatID int, +@DecisionDate datetime, +@Ref1 varchar(10), +@Ref2 varchar(10), +@Ref3 varchar(10), +@Ref4 varchar(10), +@Landmark bit, +@SubmitBy varchar(100) + +AS DELETE FROM decisions + +WHERE [ID] = @ID +GO +/****** Object: StoredProcedure [dbo].[spDeleteCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- deletes a category from the category table + +CREATE PROCEDURE [dbo].[spDeleteCategory] + +@CatID int OUTPUT, +@CatDescription varchar(200) + +AS DELETE FROM category + +WHERE CatID = @CatID + +SELECT @CatID = SCOPE_IDENTITY() +GO +/****** Object: StoredProcedure [dbo].[spDeleteAdjudicator] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- deletes an adjudicator form the adjudicator table + +CREATE PROCEDURE [dbo].[spDeleteAdjudicator] + +@AdjID int OUTPUT, +@Firstname varchar(50), +@Lastname varchar(50) + +AS DELETE FROM adjudicator + +WHERE AdjID = @AdjID + +SELECT @AdjID = SCOPE_IDENTITY() +GO +/****** Object: StoredProcedure [dbo].[spGetCategoryList] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetCategoryList] +AS +SELECT [CatID],[CatDescription] +FROM category +ORDER BY CatID +GO +/****** Object: StoredProcedure [dbo].[spDeleteUser] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spDeleteUser] + +@UserID int + +AS + +DECLARE +@Count int + +SELECT @Count = COUNT(*) FROM Users + +IF @Count > 1 + BEGIN + DELETE + Users + + WHERE + UserID = @UserID + END +GO +/****** Object: StoredProcedure [dbo].[spSearchDecision] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER OFF +GO +CREATE PROCEDURE [dbo].[spSearchDecision] + + @CurrentPage int, + @PageSize int, + @TotalRecords int output + AS +-- Create a temp table to hold the current page of data +-- Add and ID Column to count the decisions + +Create TABLE #TempTable +( +ID int PRIMARY KEY, +FirstName varchar(250), +LastName varchar(250), +DecisionDate datetime, +Ref1 varchar(100), +Ref2 varchar(100), +Ref3 varchar(100), +Ref4 varchar(100), +Landmark bit, +UserFName varchar(250), +UserLName varchar(250) +) +-- Fill the temp table with Decisions Data +INSERT INTO #TempTable +( +ID, +FirstName, +LastName, +DecisionDate, +Ref1, +Ref2, +Ref3, +Ref4, +Landmark, +UserFName, +UserLName +) +SELECT ID, +adjudicator.FirstName,adjudicator.LastName, DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark,Users.FirstName As UserFName,Users.LastName As UserLName +From decisions inner join adjudicator ON (decisions.AdjID=adjudicator.AdjID) inner join Users ON (decisions.SubmitBy=Users.UserID) +-- Create variable to identify the first and last record that should be selected +DECLARE @FirstRec int, @LastRec int +SELECT @FirstRec = (@CurrentPage - 1) * @PageSize +SELECT @LastRec = (@CurrentPage * @PageSize + 1) + +-- Select one page of data based on the record numbers above +Select +ID,FirstName,LastName,DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark,UserFName,UserLName +From #TempTable +WHERE +ID > @FirstRec +AND +ID < @LastRec +-- Return the total number of records availbale as an output parameter +SELECT @TotalRecords = COUNT(*) FROM decisions inner join adjudicator ON (decisions.AdjID=adjudicator.AdjID) inner join Users ON (decisions.SubmitBy=Users.UserID) + /* SET NOCOUNT ON */ +GO +/****** Object: StoredProcedure [dbo].[spLoginUser] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spLoginUser] + +@Username varchar(50), +@Password varchar(50) + +AS + +SELECT + * + +FROM + Users + +WHERE + Username = @Username + +AND + [Password] = @Password +GO +/****** Object: StoredProcedure [dbo].[spLandmarkDecisions] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS OFF +GO +SET QUOTED_IDENTIFIER OFF +GO +CREATE PROCEDURE [dbo].[spLandmarkDecisions] AS +SELECT ID, +FirstName,LastName, CONVERT(char(24),DecisionDate,101) AS DecisionDate,Ref1,Ref2,Ref3,Ref4, +'Landmark' = CASE Landmark WHEN 1 THEN 'Landmark' +WHEN 0 THEN ' ' END +From decisions inner join adjudicator ON decisions.AdjID=adjudicator.AdjID +WHERE landmark = 1 +GO +/****** Object: StoredProcedure [dbo].[spLandmark] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spLandmark] + + @CurrentPage int, + @PageSize int, + @TotalRecords int output + AS +-- Create a temp table to hold the current page of data +-- Add and ID Column to count the decisions + +Create TABLE #TempTable +( +ID int PRIMARY KEY, +FirstName varchar(250), +LastName varchar(250), +DecisionDate datetime, +Ref1 varchar(100), +Ref2 varchar(100), +Ref3 varchar(100), +Ref4 varchar(100), +Landmark bit +) +-- Fill the temp table with Decisions Data +INSERT INTO #TempTable +( +ID, +FirstName, +LastName, +DecisionDate, +Ref1, +Ref2, +Ref3, +Ref4, +Landmark +) + + +SELECT ID, +FirstName,LastName, DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark +From decisions inner join adjudicator ON decisions.AdjID=adjudicator.AdjID WHERE landmark = 1 +-- Create variable to identify the first and last record that should be selected +DECLARE @FirstRec int, @LastRec int +SELECT @FirstRec = (@CurrentPage - 1) * @PageSize +SELECT @LastRec = (@CurrentPage * @PageSize + 1) + +-- Select one page of data based on the record numbers above +Select +ID,FirstName,LastName,DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark +From #TempTable +WHERE +ID > @FirstRec +AND +ID < @LastRec +-- Return the total number of records availbale as an output parameter +SELECT @TotalRecords = COUNT(*) FROM decisions inner join adjudicator ON decisions.AdjID=adjudicator.AdjID + /*SET NOCOUNT ON*/ +GO +/****** Object: StoredProcedure [dbo].[spInsertDecision] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- inserts a decision into the decisions table + +CREATE PROCEDURE [dbo].[spInsertDecision] + +@ID int OUTPUT, +@AdjudicatorID int, +@CategoryID int, +@SubCategoryID int, +@decision_date datetime, +@Ref1 varchar(50), +@Ref2 varchar(50), +@Ref3 varchar(50), +@Ref4 varchar(50), +@Landmark bit, +@SubmitID int, +@Summary ntext, +@Document image = Null, +@DocType varChar(50)='', +@Doc_name varChar(200)='' +--@SubmitBy varchar(100) + +AS +BEGIN TRANSACTION +INSERT INTO decisions (AdjID, CatID, SubCatID, DecisionDate, Ref1, Ref2, Ref3, Ref4, Landmark, +SubmitBy, Summary,Document,DocType,Doc_name) + +VALUES (@AdjudicatorID, +@CategoryID, +@SubCategoryID, +@decision_date, @Ref1, @Ref2, @Ref3, @Ref4, @Landmark, @SubmitID,@Summary,@Document,@DocType,@Doc_name +) +IF @@ERROR <>0 +BEGIN +ROLLBACK TRANSACTION +RETURN 1 +END + +SELECT @ID = SCOPE_IDENTITY() + +COMMIT TRANSACTION +GO +/****** Object: StoredProcedure [dbo].[spGetUserList] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE Procedure [dbo].[spGetUserList] +As +Select UserID,Username,[Password],Firstname, Lastname +FROM USERS +Order By Firstname +GO +/****** Object: StoredProcedure [dbo].[spGetUser] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetUser] + +@UserID int + +AS + +SELECT + UserID, Username, [Password], Firstname, Lastname + +FROM + Users + +WHERE + UserID = @UserID +GO +/****** Object: Table [dbo].[subcategory] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[subcategory]( + [SubCatID] [int] IDENTITY(1,1) NOT NULL, + [CatID] [int] NULL, + [SubDescription] [varchar](300) NULL, + [num] [int] NULL, + CONSTRAINT [PK_subcategory] PRIMARY KEY CLUSTERED +( + [SubCatID] ASC +)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] +) ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: StoredProcedure [dbo].[spUpdateUser] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spUpdateUser] + +@UserID int, +@Username varchar(50), +@Password varchar(50), +@Firstname varchar(50), +@Lastname varchar(50) + +AS + +UPDATE + Users + +SET + Username = @Username, + Firstname = @Firstname, + Lastname = @Lastname + +WHERE + UserID = @UserID + +-- Update the password ONLY if provided +IF @Password IS NOT NULL AND 0 < LEN(@Password) + UPDATE Users + SET [Password] = @Password + WHERE UserID = @UserID +GO +/****** Object: StoredProcedure [dbo].[spUpdateDecision] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER OFF +GO +-- updates a decision within the decisions table + +CREATE PROCEDURE [dbo].[spUpdateDecision] + +@ID int, +@AdjudicatorID int, +@CategoryID int, +@SubCategoryID int, +@decision_date datetime, +@Ref1 varchar(50), +@Ref2 varchar(50), +@Ref3 varchar(50), +@Ref4 varchar(50), +@Landmark bit, +@SubmitID int, +@Summary ntext, +@Document image = Null, +@DocType varChar(50)='', +@Doc_name varChar(200)='' + +AS UPDATE decisions SET + +--[ID]=@ID, +AdjID=@AdjudicatorID, +CatID=@CategoryID, +SubCatID=@SubCategoryID, +DecisionDate=@decision_date, +Ref1=@Ref1, +Ref2=@Ref2, +Ref3=@Ref3, +Ref4=@Ref4, +Landmark=@Landmark, +SubmitBy=@SubmitID, +Summary = @Summary, +Document = @Document, +DocType = @DocType, +Doc_name = @Doc_name + +WHERE [ID]=@ID +GO +/****** Object: StoredProcedure [dbo].[spUpdateCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- updates categories within the category table + +CREATE PROCEDURE [dbo].[spUpdateCategory] + +@CatID int, +@CatDescription varchar(200) + +AS UPDATE category + +SET CatDescription = @CatDescription + +WHERE CatID = @CatID +GO +/****** Object: StoredProcedure [dbo].[spUpdateAdjudicator] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- updates adjudicators firstname and lastname within the adjudicators table + +CREATE PROCEDURE [dbo].[spUpdateAdjudicator] + +@AdjID int, +@FirstName varchar(50), +@LastName varchar(50) + +AS UPDATE adjudicator + +SET FirstName = @FirstName, LastName = @LastName + +WHERE AdjID = @AdjID +GO +/****** Object: StoredProcedure [dbo].[spSearchDecisionAllCriteria] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spSearchDecisionAllCriteria] + @CurrentPage int, + @PageSize int, + @Ref1 Varchar(200), + @Ref2 Varchar(200), + @Ref3 Varchar(200), + @Ref4 Varchar(200), + --@FromDate datetime, + --@EndDate datetime, + @CatID int, + @SubCatID int, + @AdjId int, + @TotalRecords int output + AS +-- Create a temp table to hold the current page of data +-- Add and ID Column to count the decisions + +Create TABLE #TempTable +( +ID int PRIMARY KEY, +FirstName varchar(250), +LastName varchar(250), +DecisionDate datetime, +Ref1 varchar(100), +Ref2 varchar(100), +Ref3 varchar(100), +Ref4 varchar(100), +Landmark bit, +CatDescription varchar(200), +SubDescription varchar(200) +) +-- Fill the temp table with Decisions Data +INSERT INTO #TempTable +( +ID, +FirstName, +LastName, +DecisionDate, +Ref1, +Ref2, +Ref3, +Ref4, +Landmark, +CatDescription, +SubDescription +) + + +SELECT ID, +FirstName,LastName, DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark,CatDescription,SubDescription +From decisions As d inner join adjudicator As adj ON (d.AdjID=adj.AdjID) +inner join category As cat ON (d.CatID= cat.CatID) +inner join subcategory As s ON (d.SubCatID =s.SubCatID) +WHERE Ref1 = @Ref1 OR Ref2 = @Ref2 OR Ref3 =@Ref3 OR Ref4 = @Ref4 OR d.CatID = @CatID OR +d.AdjID = @AdjID + +--DecisionDate Between @FromDate AND @EndDate OR +-- Create variable to identify the first and last record that should be selected +DECLARE @FirstRec int, @LastRec int +SELECT @FirstRec = (@CurrentPage - 1) * @PageSize +SELECT @LastRec = (@CurrentPage * @PageSize + 1) + +-- Select one page of data based on the record numbers above +Select +ID,FirstName,LastName,DecisionDate,Ref1,Ref2,Ref3,Ref4,Landmark,CatDescription,SubDescription +From #TempTable +WHERE +ID > @FirstRec +AND +ID < @LastRec +-- Return the total number of records availbale as an output parameter +SELECT @TotalRecords = COUNT(*) From decisions As d inner join adjudicator As adj ON (d.AdjID=adj.AdjID) +inner join category As cat ON (d.CatID= cat.CatID) +inner join subcategory As s ON (d.SubCatID =s.SubCatID) +WHERE Ref1 = @Ref1 OR Ref2 = @Ref2 OR Ref3 =@Ref3 OR Ref4 = @Ref4 OR d.CatID = @CatID OR +d.AdjID = @AdjID + /* SET NOCOUNT ON */ + --OR (DecisionDate Between @FromDate AND @EndDate) +GO +/****** Object: StoredProcedure [dbo].[spUpdateSubCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- updates the name of a sub category within a category + +CREATE PROCEDURE [dbo].[spUpdateSubCategory] + +@SubCatID int, +@SubDescription varchar(300) + +AS UPDATE subcategory + +SET SubDescription = @SubDescription + +WHERE SubCatID = @SubCatID +GO +/****** Object: StoredProcedure [dbo].[spGetSubCategoryListByCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetSubCategoryListByCategory] + +@CategoryId int + +AS + +SELECT SubcatID, s.CatID,c.CatID, s.num, s.SubDescription, c.CatDescription as categoryname +FROM subcategory s +inner join category c on s.CatID = c.CatID +WHERE s.CatID = @CategoryId +ORDER BY s. num +GO +/****** Object: StoredProcedure [dbo].[spGetSubCategoryList] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetSubCategoryList] + +AS + +SELECT SubcatID, CatID, num, [SubDescription] +FROM subcategory +ORDER BY CatID, num +GO +/****** Object: StoredProcedure [dbo].[spGetSubCategoryByCat] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetSubCategoryByCat] + +@CatID int + +AS +SELECT sc.SubDescription, sc.SubCatID,sc.CatID, num +FROM subcategory AS sc INNER JOIN category AS c +ON sc.CatID = c.CatID +WHERE sc.CatID = @CatID +GO +/****** Object: StoredProcedure [dbo].[spGetSubCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +Create Procedure [dbo].[spGetSubCategory] +AS +Select SubDescription +FROM subcategory +ORDER BY catID,subcatID +GO +/****** Object: StoredProcedure [dbo].[spGetDecisionForIndex] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetDecisionForIndex] +@CatId int, +@SubCatId int + AS +Select d.ID,Ref1,Ref2,Ref3,Ref4,FirstName,LastName,DecisionDate,cat.CatDescription, s.SubDescription,Landmark,Summary + From decisions As d +inner join adjudicator As adj on (d.AdjID = adj.AdjID) + inner join category As cat ON (d.CatID= cat.CatID) + inner join subcategory As s ON (d.SubCatID =s.SubCatID) +WHERE cat.CatID = @CatId AND s.SubCatID = @SubCatId +GO +/****** Object: StoredProcedure [dbo].[spGetDecisionAllIncludingLandmarkWithoutID] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetDecisionAllIncludingLandmarkWithoutID] +---This sp returns all the decision details including the users who entered the result, this sp also changes the bit 0 or 1 to Landmark string, without a ID parameter + AS +Select d.ID, d.AdjID,d.SubmitBy,Ref1,Ref2,Ref3,Ref4,adj.FirstName As AdjFirstName,adj.LastName As AdjLastName,CONVERT(char(24),DecisionDate,101)AS DecisionDate,d.CatID,d.SubCatID,cat.CatDescription, s.SubDescription,Summary,Users.FirstName As UFirstName,Users.LastName As ULastName, +'Landmark'= +CASE Landmark WHEN 1 THEN 'Landmark' +WHEN 0 THEN 'NO Landmark ' END + From decisions As d +inner join adjudicator As adj on (d.AdjID = adj.AdjID) + inner join category As cat ON (d.CatID= cat.CatID) + inner join subcategory As s ON (d.SubCatID =s.SubCatID)inner join + USERS ON (d.SubmitBy = USERS.UserID) +GO +/****** Object: StoredProcedure [dbo].[spGetDecisionAllIncludingLandmark] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetDecisionAllIncludingLandmark] +---This sp returns all the decision details including the users who entered the result, this sp also changes the bit 0 or 1 to Landmark string +@DecisionId int + AS +Select d.ID, d.AdjID,d.SubmitBy,Ref1,Ref2,Ref3,Ref4,adj.FirstName As AdjFirstName,adj.LastName As AdjLastName,CONVERT(char(24),DecisionDate,101)AS DecisionDate,d.CatID,d.SubCatID,cat.CatDescription, s.SubDescription,Summary,Users.FirstName As UFirstName,Users.LastName As ULastName, +'Landmark'= +CASE Landmark WHEN 1 THEN 'Landmark' +WHEN 0 THEN 'NO Landmark ' END + From decisions As d +inner join adjudicator As adj on (d.AdjID = adj.AdjID) + inner join category As cat ON (d.CatID= cat.CatID) + inner join subcategory As s ON (d.SubCatID =s.SubCatID)inner join + USERS ON (d.SubmitBy = USERS.UserID) +WHERE id=@DecisionId +GO +/****** Object: StoredProcedure [dbo].[spGetDecisionAll] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spGetDecisionAll] +---This sp returns all the decision details including the users who entered the result +@DecisionId int + AS +Select d.ID, d.AdjID,d.SubmitBy,Ref1,Ref2,Ref3,Ref4,adj.FirstName As AdjFirstName,adj.LastName As AdjLastName,DecisionDate,d.CatID,d.SubCatID,cat.CatDescription, s.SubDescription,Landmark,Summary,Users.FirstName As UFirstName,Users.LastName As ULastName + From decisions As d +inner join adjudicator As adj on (d.AdjID = adj.AdjID) + inner join category As cat ON (d.CatID= cat.CatID) + inner join subcategory As s ON (d.SubCatID =s.SubCatID)inner join + USERS ON (d.SubmitBy = USERS.UserID) +WHERE id=@DecisionId +GO +/****** Object: StoredProcedure [dbo].[spGetDecision] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +---Added by Osai to Get a Decision's details based on Id +--- The result is used to populate the Edit page for Update + +CREATE PROCEDURE [dbo].[spGetDecision] +@DecisionId int + AS +Select d.ID, d.AdjID,d.SubmitBy,Ref1,Ref2,Ref3,Ref4,FirstName,LastName,DecisionDate,d.CatID,d.SubCatID,cat.CatDescription, s.SubDescription,Landmark,Summary + From decisions As d +inner join adjudicator As adj on (d.AdjID = adj.AdjID) + inner join category As cat ON (d.CatID= cat.CatID) + inner join subcategory As s ON (d.SubCatID =s.SubCatID) +WHERE id=@DecisionId +GO +/****** Object: StoredProcedure [dbo].[spDeleteSubCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spDeleteSubCategory] + +@Id int + +AS + +DELETE + SubCategory + +WHERE + SubCatID = @Id +GO +/****** Object: Table [dbo].[decisions2] Script Date: 11/25/2024 14:26:09 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +SET ANSI_PADDING ON +GO +CREATE TABLE [dbo].[decisions2]( + [ID] [int] IDENTITY(1,1) NOT NULL, + [AdjID] [int] NULL, + [CatID] [int] NULL, + [SubCatID] [int] NULL, + [DecisionDate] [datetime] NULL, + [Ref1] [varchar](200) NULL, + [Ref2] [varchar](200) NULL, + [Ref3] [varchar](200) NULL, + [Ref4] [varchar](200) NULL, + [Landmark] [bit] NULL, + [SubmitBy] [int] NULL, + [Summary] [ntext] NULL, + [Document] [image] NULL, + [DocType] [varchar](50) NULL, + [Doc_name] [varchar](200) NULL, + CONSTRAINT [PK_decisions] PRIMARY KEY CLUSTERED +( + [ID] ASC +)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY] +) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY] +GO +SET ANSI_PADDING OFF +GO +/****** Object: StoredProcedure [dbo].[spCat-SubCat] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +-- returns the sub category where the subcatID = the catID from different tables + +CREATE PROCEDURE [dbo].[spCat-SubCat] + +@CatID int + +AS +SELECT sc.SubDescription +FROM subcategory AS sc INNER JOIN category AS c +ON sc.CatID = c.CatID +WHERE sc.CatID = @CatID +GO +/****** Object: StoredProcedure [dbo].[spAddSubCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[spAddSubCategory] + +@description varchar(300), +@CatID int, +@num int + +AS + +INSERT + Subcategory + ( + CatID, + SubDescription, + num + ) + +VALUES + ( + @CatID, + @description, + @num + ) +GO +/****** Object: StoredProcedure [dbo].[GetCategoryAndSubCategory] Script Date: 11/25/2024 14:26:10 ******/ +SET ANSI_NULLS ON +GO +SET QUOTED_IDENTIFIER ON +GO +CREATE PROCEDURE [dbo].[GetCategoryAndSubCategory] +/* + ( + @parameter1 datatype = default value, + @parameter2 datatype OUTPUT + ) +*/ +AS + select c.CatID As pnum,CatDescription,sc.SubDescription,sc.num +FROM +subcategory AS sc INNER JOIN category AS c +ON sc.CatID = c.CatID + +Group By c.CatID,c.CatDescription,sc.SubDescription,sc.num +GO +/****** Object: ForeignKey [FK_decisions_category] Script Date: 11/25/2024 14:26:09 ******/ +ALTER TABLE [dbo].[decisions2] WITH NOCHECK ADD CONSTRAINT [FK_decisions_category] FOREIGN KEY([CatID]) +REFERENCES [dbo].[category] ([CatID]) +GO +ALTER TABLE [dbo].[decisions2] CHECK CONSTRAINT [FK_decisions_category] +GO +/****** Object: ForeignKey [FK_decisions_subcategory] Script Date: 11/25/2024 14:26:09 ******/ +ALTER TABLE [dbo].[decisions2] WITH NOCHECK ADD CONSTRAINT [FK_decisions_subcategory] FOREIGN KEY([SubCatID]) +REFERENCES [dbo].[subcategory] ([SubCatID]) +GO +ALTER TABLE [dbo].[decisions2] CHECK CONSTRAINT [FK_decisions_subcategory] +GO +/****** Object: ForeignKey [FK_subcategory_category] Script Date: 11/25/2024 14:26:09 ******/ +ALTER TABLE [dbo].[subcategory] WITH CHECK ADD CONSTRAINT [FK_subcategory_category] FOREIGN KEY([CatID]) +REFERENCES [dbo].[category] ([CatID]) +GO +ALTER TABLE [dbo].[subcategory] CHECK CONSTRAINT [FK_subcategory_category] +GO diff --git a/terraform/environments/tribunals/load_balancer.tf b/terraform/environments/tribunals/load_balancer.tf index 79c4debc3b7..99825a46efa 100644 --- a/terraform/environments/tribunals/load_balancer.tf +++ b/terraform/environments/tribunals/load_balancer.tf @@ -3,8 +3,32 @@ locals { # Create a mapping between listener headers and target group ARNs listener_header_to_target_group = { - for k, v in var.services : - v.name_prefix => aws_lb_target_group.tribunals_target_group[k].arn + for k, v in var.services : v.name_prefix => ( + aws_lb_target_group.tribunals_target_group[k].arn + ) + } + service_priorities = { + adminappeals = 1 + administrativeappeals = 2 + carestandards = 3 + charity = 4 + cicap = 5 + claimsmanagement = 6 + consumercreditappeals = 7 + employmentappeals = 8 + estateagentappeals = 9 + financeandtax = 10 + immigrationservices = 11 + informationrights = 12 + landregistrationdivision = 13 + landschamber = 14 + phl = 15 + siac = 16 + sscs = 17 + tax = 18 + taxandchancery_ut = 19 + transportappeals = 20 + asylumsupport = 21 } } @@ -110,8 +134,7 @@ resource "aws_lb_listener_rule" "tribunals_lb_rule" { for_each = local.listener_header_to_target_group listener_arn = aws_lb_listener.tribunals_lb.arn - priority = index(keys(local.listener_header_to_target_group), each.key) + 1 - + priority = local.service_priorities[each.key] action { type = "forward" target_group_arn = each.value @@ -123,8 +146,3 @@ resource "aws_lb_listener_rule" "tribunals_lb_rule" { } } } - -resource "aws_wafv2_web_acl_association" "web_acl_association_my_lb" { - resource_arn = aws_lb.tribunals_lb.arn - web_acl_arn = aws_wafv2_web_acl.tribunals_web_acl.arn -} diff --git a/terraform/environments/tribunals/main.tf b/terraform/environments/tribunals/main.tf index 4cec5cdfe0d..f38e84aecde 100644 --- a/terraform/environments/tribunals/main.tf +++ b/terraform/environments/tribunals/main.tf @@ -14,7 +14,6 @@ module "appeals" { app_name = "appeals" app_url = "administrativeappeals" module_name = "appeals" - sql_migration_path = "../scripts/administrative_appeals" app_db_name = "ossc" app_db_login_name = "ossc-app" app_source_db_name = "Ossc" @@ -54,7 +53,6 @@ module "ahmlr" { app_name = "hmlands" app_url = "landregistrationdivision" module_name = "ahmlr" - sql_migration_path = "../scripts/ahmlr" app_db_name = "hmlands" app_db_login_name = "hmlands-app" app_source_db_name = "hmlands" @@ -94,7 +92,6 @@ module "care_standards" { app_name = "care-standards" module_name = "care_standards" app_url = "carestandards" - sql_migration_path = "../scripts/care_standards" app_db_name = "carestandards" app_db_login_name = "carestandards-app" app_source_db_name = "carestandards" @@ -134,7 +131,6 @@ module "cicap" { app_name = "cicap" app_url = "cicap" module_name = "cicap" - sql_migration_path = "../scripts/cicap" app_db_name = "cicap" app_db_login_name = "cicap-app" app_source_db_name = "cicap" @@ -174,7 +170,6 @@ module "employment_appeals" { app_name = "employment-appeals" app_url = "employmentappeals" module_name = "employment_appeals" - sql_migration_path = "../scripts/employment_appeals" app_db_name = "eat" app_db_login_name = "eat-app" app_source_db_name = "eat" @@ -215,7 +210,6 @@ module "finance_and_tax" { app_name = "finance-and-tax" app_url = "financeandtax" module_name = "finance_and_tax" - sql_migration_path = "../scripts/finance_and_tax" app_db_name = "ftt" app_db_login_name = "ftt-app" app_source_db_name = "ftt" @@ -256,7 +250,6 @@ module "immigration_services" { app_name = "immigration-services" app_url = "immigrationservices" module_name = "immigration_services" - sql_migration_path = "../scripts/immigration_services" app_db_name = "imset" app_db_login_name = "imset-app" app_source_db_name = "imset" @@ -297,7 +290,6 @@ module "information_tribunal" { app_name = "information-tribunal" app_url = "informationrights" module_name = "information_tribunal" - sql_migration_path = "../scripts/information_tribunal" app_db_name = "it" app_db_login_name = "it-app" app_source_db_name = "it" @@ -338,7 +330,6 @@ module "lands_tribunal" { app_name = "lands-chamber" app_url = "landschamber" module_name = "lands_tribunal" - sql_migration_path = "../scripts/lands_chamber" app_db_name = "lands" app_db_login_name = "lands-app" app_source_db_name = "lands" @@ -379,7 +370,6 @@ module "transport" { app_name = "transport" app_url = "transportappeals" module_name = "transport" - sql_migration_path = "../scripts/transport" app_db_name = "transport" app_db_login_name = "transport-app" app_source_db_name = "Transport" @@ -414,6 +404,46 @@ module "transport" { new_db_password = random_password.app_new_password.result } +module "asylum_support" { + is_ftp_app = false + source = "./modules/tribunal" + app_name = "asylum-support" + app_url = "asylumsupport" + module_name = "asylum_support" + app_db_name = "asadj" + app_db_login_name = "asadj-app" + app_source_db_name = "asadj" + app_rds_url = aws_db_instance.rdsdb.address + app_rds_user = local.rds_user + app_rds_port = local.rds_port + app_rds_password = local.rds_password + + app_source_db_url = local.source_db_url + app_source_db_user = local.source_db_user + app_source_db_password = local.source_db_password + environment = local.environment + application_data = local.application_data.accounts[local.environment] + tags = local.tags + dms_instance_arn = aws_dms_replication_instance.tribunals_replication_instance.replication_instance_arn + task_definition_volume = local.application_data.accounts[local.environment].task_definition_volume + appscaling_min_capacity = local.application_data.accounts[local.environment].appscaling_min_capacity + appscaling_max_capacity = local.application_data.accounts[local.environment].appscaling_max_capacity + ecs_scaling_cpu_threshold = local.application_data.accounts[local.environment].ecs_scaling_cpu_threshold + ecs_scaling_mem_threshold = local.application_data.accounts[local.environment].ecs_scaling_mem_threshold + app_count = local.application_data.accounts[local.environment].app_count + server_port = local.application_data.accounts[local.environment].server_port_1 + cluster_id = aws_ecs_cluster.tribunals_cluster.id + cluster_name = aws_ecs_cluster.tribunals_cluster.name + vpc_shared_id = data.aws_vpc.shared.id + subnets_shared_public_ids = data.aws_subnets.shared-public.ids + aws_acm_certificate_external = aws_acm_certificate.external + documents_location = "Judgments" + target_group_attachment_port = var.services["asylum_support"].port + target_group_arns = local.target_group_arns + target_group_arns_sftp = local.target_group_arns_sftp + new_db_password = random_password.app_new_password.result +} + module "charity_tribunal_decisions" { is_ftp_app = true source = "./modules/tribunal_ftp" diff --git a/terraform/environments/tribunals/modules/ecs_task/main.tf b/terraform/environments/tribunals/modules/ecs_task/main.tf index 591416e7893..31cf4a6f371 100644 --- a/terraform/environments/tribunals/modules/ecs_task/main.tf +++ b/terraform/environments/tribunals/modules/ecs_task/main.tf @@ -147,6 +147,15 @@ resource "aws_ecs_service" "ecs_service" { container_port = var.server_port } + deployment_circuit_breaker { + enable = true + rollback = true + } + + deployment_minimum_healthy_percent = 0 + deployment_maximum_percent = 100 + force_new_deployment = true + depends_on = [ aws_iam_role_policy_attachment.ecs_task_execution_role, aws_ecs_task_definition.ecs_task_definition, aws_cloudwatch_log_group.cloudwatch_group ] @@ -188,6 +197,15 @@ resource "aws_ecs_service" "ecs_service_sftp" { container_port = 22 } + deployment_circuit_breaker { + enable = true + rollback = true + } + + deployment_minimum_healthy_percent = 0 + deployment_maximum_percent = 100 + force_new_deployment = true + depends_on = [ aws_iam_role_policy_attachment.ecs_task_execution_role, aws_ecs_task_definition.ecs_task_definition, aws_cloudwatch_log_group.cloudwatch_group ] diff --git a/terraform/environments/tribunals/modules/tribunal/main.tf b/terraform/environments/tribunals/modules/tribunal/main.tf index 94c92e5057b..addf317b88d 100644 --- a/terraform/environments/tribunals/modules/tribunal/main.tf +++ b/terraform/environments/tribunals/modules/tribunal/main.tf @@ -3,7 +3,6 @@ locals { app = var.app_name app_url = var.app_url module_name = var.module_name - sql_migration_path = var.sql_migration_path app_db_name = var.app_db_name app_db_login_name = var.app_db_login_name app_source_db_name = var.app_source_db_name diff --git a/terraform/environments/tribunals/modules/tribunal/variables.tf b/terraform/environments/tribunals/modules/tribunal/variables.tf index 83155f1c90e..a59c8ce45d2 100644 --- a/terraform/environments/tribunals/modules/tribunal/variables.tf +++ b/terraform/environments/tribunals/modules/tribunal/variables.tf @@ -9,9 +9,6 @@ variable "module_name" { variable "app_url" { } -variable "sql_migration_path" { -} - variable "app_db_name" { } diff --git a/terraform/environments/tribunals/waf.tf b/terraform/environments/tribunals/waf.tf index fd723b99a41..d027cb5eb95 100644 --- a/terraform/environments/tribunals/waf.tf +++ b/terraform/environments/tribunals/waf.tf @@ -1,6 +1,7 @@ resource "aws_wafv2_ip_set" "allowed_ip_set" { - name = "allowed-ip-set" - scope = "REGIONAL" + provider = aws.us-east-1 + name = "allowed-ip-set" + scope = "CLOUDFRONT" addresses = [ "20.26.11.71/32", "20.26.11.108/32", "20.49.214.199/32", "20.49.214.228/32", "51.149.249.0/29", "51.149.249.32/29", @@ -12,8 +13,9 @@ resource "aws_wafv2_ip_set" "allowed_ip_set" { } resource "aws_wafv2_web_acl" "tribunals_web_acl" { - name = "tribunals-web-acl" - scope = "REGIONAL" + provider = aws.us-east-1 + name = "tribunals-web-acl" + scope = "CLOUDFRONT" default_action { allow {} @@ -21,7 +23,7 @@ resource "aws_wafv2_web_acl" "tribunals_web_acl" { rule { name = "common-rule-set" - priority = 1 + priority = 2 override_action { none {} @@ -61,7 +63,7 @@ resource "aws_wafv2_web_acl" "tribunals_web_acl" { rule { name = "AllowSpecificIPsForAdminAndSecurePaths" - priority = 2 + priority = 3 action { allow {} @@ -122,7 +124,7 @@ resource "aws_wafv2_web_acl" "tribunals_web_acl" { rule { name = "BlockNonAllowedIPsForAdminAndSecurePaths" - priority = 3 + priority = 4 action { block { @@ -168,14 +170,15 @@ resource "aws_wafv2_web_acl" "tribunals_web_acl" { } resource "aws_wafv2_regex_pattern_set" "blocked_paths" { - name = "blocked-paths" - scope = "REGIONAL" + provider = aws.us-east-1 + name = "blocked-paths" + scope = "CLOUDFRONT" regular_expression { - regex_string = "^/admin(/.*)?$" + regex_string = "(?i)^/admin(/.*)?$" } regular_expression { - regex_string = "^/secure(/.*)?$" + regex_string = "(?i)^/secure(/.*)?$" } } \ No newline at end of file diff --git a/terraform/environments/xhibit-portal/bastion_linux.json b/terraform/environments/xhibit-portal/bastion_linux.json index 0dcc8c42fe9..ef27472af5b 100644 --- a/terraform/environments/xhibit-portal/bastion_linux.json +++ b/terraform/environments/xhibit-portal/bastion_linux.json @@ -15,6 +15,7 @@ "solomona": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8Z5s6NukuuhdrioJNfRSypI5+fQ3t9wPk6bgYtrJtnvlS+nDlmUAFwbky07al/UvHs8p11sAL97CqJ8Ky0k9WDRrlQm3Boj/lFrdDue+i7aZ7f//WRaMjfIiji1RP5pfrbKptzM+QLYFqoBjbv710599fbK+N2Eu2winqVxDmZk9QchlVyAE9Lc4pVVeU4IpTkVBZ1BhqCz0Y30rtAkqAPXdBskDC7NjM+bWcmBeqwQFkykEd/TThct1ZmzKk+xtEEaP8zkVVzcKoWpsovXOoCm3jTN1ZB4hNtMBiC4KWF3jNoliDmWd2s8rVsk+uuHrMa1nX4kJk8QLKu4YDYKhExCUzjBBJPjAax+5je+K8HSQ9T8vb9cE5Nu+MA9k6chxFo3MoxZte9B7bmNWeTNd99Nc4Iy4CkUDv6IMNinKka0h92Wzxp2gKbp2eP/DAxsPf7+OJ/k6a0fcJ/yjsClPuQozf4RdwZmeXgqN5FKTDP/UV2jTDM6QdTMMmEsvgi8k=", "shanef": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCykKL31U8+XMxaIZV/FpkHmJCUBXIialBrHcFAKTR/MK1nv4bs43hmJFgeO/t4eWyaRrS1PGu3NaxfnYdEAKTWDEUKG29vi4IdAeW08ECDOxZ0lszG6qel3P2hVS3njkgimwJmM/4vWYSoVf0NwRrOJVuvriZQ9oao0CCVvI7sIjE15yE4edFOzvoGfAM3sQJMI+XYwFTraNMGLxZhAdJ5U2o4huY2ZzJY+2pb6JkOX3eWJoL/MaotcwcVTTrtG9y5PHbVNjMJQJCSO71yy5TzElm4EWYS/Y4ZBSndE0uxA/dMfCtZnhereNXDvQtZcAXj4VxDtbpAKy1asw2pIa8BcTCOegaI7h1GNjBPXa7GPLfFwFmtGnVNBpVrIRaRkeI5Bd6JbhgkKUln9CgQLvoAEGwChPy+gjecgSFqE8Bplp7/CF35KkmPwlEGdBmQqHd5CBnyO3a/Paah/j4qGpHO9jFd5OzqF8B5lt437IJSSY/dT1mLszYm1RWXR1tKZjUtd+OyqoTPSG+lVQGJCid6HyPK+sYQr055Zc0DcsAHc79RR4rBR+Oe9yTm5WwvD8s52yWVBII/xqxaG8INW9PBfrkl0pGQ3aViP0Cj+9dmk6buKdtJX3P8Ca4eiRAp0KgiO3BIy8c8zf0s6+ZF8yTWtbIroWc7EOVixs4cjEzWBw== shane.forde@vesion1.com", "zoltanp": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDKtvsQ949K34yIp50dVyc/jAK/WlX1uuwYKPKBWybioBcuC5L+YcMc/uuZRMUFQlPxMdsWKzzQ8vaIzo05n+3Wx/TvzW/FAR8T0hI6IkkPUcv0p0lbjbFyJO9SdN2Goem8ujRkFBykON9zEfLHzFMaEmk/d8QkkIEr6sglnSr56igBWVGKkeZq74R+RyRmV0WThxq4BptfP7v2NDl8XjqdNZ5jYOT/FayqlS1Qxmsd9VjPj3BtTpf2KeOP9grsPGDTM9O4+EVSOtr/cXvJ9ELfbUj6JGtBZVG3jv+6QMfRHn66gyknzkAsyv8s6b2JY7rUuLRyxA8z+DuRiRL+K4HLvcvYdMoYaYjSU1gyPPZEoUiwSP62fBoCZr2ZibCPjkKbpBKFUgCTNsC598p9Pb5ucnnAGiKiyTyyH6xRuxpSW1Kzhby1PJ8LYJbH/Zl+083XG8HqSQONcM+eho5k6FO+8zLOWGhpByVwpsIlHHs9189YGhvbLhs+KDtYiRl2Rbryz7Oxx8/TFN6Ml1tBKcTFSIXcNhzy5yCf9nFeIS5FH/Qf/6ARJEkNGA+KEGrxlE6zvJrCnMGNoTWpV/fhZcSMhh8KEmWge2VOLl5UQ69QFgQHboj4pR+TFAwndMA7E4fBKiVgfvfhsd1KxUMvNLRaLGiYTOfOP5brNNNuiZmmmQ==", + "georged": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5y0fg79fngQSwXNYfOhdFmfZYflx6lkm0AYRAJYKbc3F4xbbtdS/UoEF7E1tJ6GpprKLDfJT59Ax7jZVuxo7byxUHexlF6kdgG1MWy4rfTECMgciabrlqDFDa++xJ9EuR7sgh73Mm9wrLsHWoUgw6zDyb6ois8NtYp9TyZChh2C23bR2J2Ljo1AsGEEeeprLtJV+Vhm7xa2kdg29RAIKxoDJzkeaU21YDhNvVbu+0RNoCA4UFvSGheRidJB2vHK+xIg3UivgQaK7cTnE8ds+Q4y+Qa2GeVIPPpnfZXbLdk+B3bFiOdhizTT6mP/q35RXbq3emhj4qEnit5xKI2Wr5DpaooyVroUmjg22DCy89gW0LlfJ3Z5jgqL+LGkT8nt6NfDJ/e9ySJ6IpUm95hznuxcCszaDn4VpNXpUlDvzkjKNPGNursjGQ96gWm5gzVD9DHgxtVKSXgzxjuTNA/Y+rvEXdlYDZpw7Ap89HDU5siQY5mJaUsZGIm94BhOzvmUWpP/NM7Q4CkH16NCED4b/nWV+am/OToXrkD05Rf5jUOWqZE30QPL24M9p5SrMhSBM0ET7mtBVo4GUkXjoc5glYSSgHVBa/QvJ/oqiwbixpsJJbqleqzoFmhcOVo9HlZTdnmgI+OUmM3StrUyiDxg3Zq5Lw5UEAUS5ux9xaI7/tPw== george.dinu@MJ006819", "luigid": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDED0d5x/+qAvao8fXW0fJVaF4b2WnOCisvNV8W5ua3brwh8WjOwKvLlZIVip9es9Q3KVk/bo67rRBoFYlODdoZHvH39ZcUHCZs9h7vv5PoDySXcp6UBMMUzgkO3aU2Y0aw2mwG1fTXUwuaINbkXGd/xYbD/lRYxCBbXWbKp4DwzfmQWE5nmny1RAMLDk8ZU7d2WFA2U3Kt2UAdRC6wi/luK3pQr4vOuAYaTS9PxkLArbPNsEPAcsaV8SEec0zLLBQLyIeCykcMB+8LaWmQPYdpJj50M6g5i4p7S/97ImyJE1MMXdtA0I+7stg8BaR7KqIbYE6h19OBx2aefnSDtLnTjveoQp7CvikViL4Yi8wYbZdTsNOQ6DfHfDQsO4iopi/w8z4rJqUzfOas0OaQO+oHZ69PtysJKo4pBGBY5ixpU8DrMtHZCAvm4sVLu6RqWmEU2FcoGB+fmTZvsy1fsxAKrz3OC6/AMrKQEpIxqkyU9mltfo5MgZdRWsHom9YahE8= luigi.difraia@MJ004442" }, "preproduction": { @@ -26,6 +27,7 @@ "matteo": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjBpFFRlhouHQKfiJcVmnymt4NGfUv32r0vlb2YHkj5NZMr5WP94oPO4TRAYkuoItpsVY/7KYzeB8j6YfDyzlQOaV+leopq1oal//RUIBiRcDlHU3RVpyA/+aZCeFUUFxMT0eaRirL/uqdpRhWEmga76KglUGk1toiebZHZeMGRfP96/W5rGAczMdVSXHr2fC2rvf+KRYAc6R898IMPLZQJznNoz0JLhKPB7I3NLhME5c+kPUGmuWO9G2Fvd/zbrPQoE75ZP7isDvfpqkfFndZrCYKBIjZup7Du/KIx6ISvtsRTChhU1fLGbDMIjdfAqdp5Ziaxf2sN2H2jTx68Rgg9bX6z2iZRoi5bX9Adz6SjGgEXLD+xo4nAkQqiTrC7Pj3HUKVXbqtf68B7+Yi9yb8clkpraiVruAPEsrqb9hczpMvPzdMbtBukdqZ6NSs6rPMWEJ6cdNjEEYpmc8b3lHq1Pg66ZrNOVlAUwB2mPPm7vyJ9pN3sOFLnDvRU49QXY0= matteo.pecorelli@L1464", "garthj": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCr5r6h19aPB7FUSreTO2557YBXm+LTE1i0GF7+3ktSRxuiaUvwR4XLXtoFNjMXVG5gsEgWsHph2GsKjKgOgCdbxc+06d0apSYkfu/qau8dFvFkqwpcOxR6eBsPvQoGSKsLP0oKq/P2gmy9szbV53/wD3Zt3XinQxyETlxVgFH+pcat6T5j8HUj34O3D3wYR7vBMrZPgWZJxm4iufnL56Ze/JBqZv+772Q29pV8VhGHBJuvXppUgAP1Ag/MdUtYdGvWl9M94AeVygJ64GE446+Ptc3qBMetSCNbHGLofuHokLJqBW5W1S9se7lVWVMvU2cWdos95AOlGNDXlTKGgdjxURPdItpwQjL5eh3MtzQYCeT0mVnLL6fO6pmNgHc1V5yr7gqTTbg8F8gdEliiQrZypq4KizNDEYM/zWQUXGh5Aguon3EzRKVViOzLf4sn0H5NnchO8IsvfSNvd22QFWHb6LShBr9hCmcqUqxAJWA7/P1KGDm0PyMXcw4SIn6bkXk= garthjackson@FVFGP0AAQ05P", "zoltanp": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDKtvsQ949K34yIp50dVyc/jAK/WlX1uuwYKPKBWybioBcuC5L+YcMc/uuZRMUFQlPxMdsWKzzQ8vaIzo05n+3Wx/TvzW/FAR8T0hI6IkkPUcv0p0lbjbFyJO9SdN2Goem8ujRkFBykON9zEfLHzFMaEmk/d8QkkIEr6sglnSr56igBWVGKkeZq74R+RyRmV0WThxq4BptfP7v2NDl8XjqdNZ5jYOT/FayqlS1Qxmsd9VjPj3BtTpf2KeOP9grsPGDTM9O4+EVSOtr/cXvJ9ELfbUj6JGtBZVG3jv+6QMfRHn66gyknzkAsyv8s6b2JY7rUuLRyxA8z+DuRiRL+K4HLvcvYdMoYaYjSU1gyPPZEoUiwSP62fBoCZr2ZibCPjkKbpBKFUgCTNsC598p9Pb5ucnnAGiKiyTyyH6xRuxpSW1Kzhby1PJ8LYJbH/Zl+083XG8HqSQONcM+eho5k6FO+8zLOWGhpByVwpsIlHHs9189YGhvbLhs+KDtYiRl2Rbryz7Oxx8/TFN6Ml1tBKcTFSIXcNhzy5yCf9nFeIS5FH/Qf/6ARJEkNGA+KEGrxlE6zvJrCnMGNoTWpV/fhZcSMhh8KEmWge2VOLl5UQ69QFgQHboj4pR+TFAwndMA7E4fBKiVgfvfhsd1KxUMvNLRaLGiYTOfOP5brNNNuiZmmmQ==", + "georged": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5y0fg79fngQSwXNYfOhdFmfZYflx6lkm0AYRAJYKbc3F4xbbtdS/UoEF7E1tJ6GpprKLDfJT59Ax7jZVuxo7byxUHexlF6kdgG1MWy4rfTECMgciabrlqDFDa++xJ9EuR7sgh73Mm9wrLsHWoUgw6zDyb6ois8NtYp9TyZChh2C23bR2J2Ljo1AsGEEeeprLtJV+Vhm7xa2kdg29RAIKxoDJzkeaU21YDhNvVbu+0RNoCA4UFvSGheRidJB2vHK+xIg3UivgQaK7cTnE8ds+Q4y+Qa2GeVIPPpnfZXbLdk+B3bFiOdhizTT6mP/q35RXbq3emhj4qEnit5xKI2Wr5DpaooyVroUmjg22DCy89gW0LlfJ3Z5jgqL+LGkT8nt6NfDJ/e9ySJ6IpUm95hznuxcCszaDn4VpNXpUlDvzkjKNPGNursjGQ96gWm5gzVD9DHgxtVKSXgzxjuTNA/Y+rvEXdlYDZpw7Ap89HDU5siQY5mJaUsZGIm94BhOzvmUWpP/NM7Q4CkH16NCED4b/nWV+am/OToXrkD05Rf5jUOWqZE30QPL24M9p5SrMhSBM0ET7mtBVo4GUkXjoc5glYSSgHVBa/QvJ/oqiwbixpsJJbqleqzoFmhcOVo9HlZTdnmgI+OUmM3StrUyiDxg3Zq5Lw5UEAUS5ux9xaI7/tPw== george.dinu@MJ006819", "luigid": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDED0d5x/+qAvao8fXW0fJVaF4b2WnOCisvNV8W5ua3brwh8WjOwKvLlZIVip9es9Q3KVk/bo67rRBoFYlODdoZHvH39ZcUHCZs9h7vv5PoDySXcp6UBMMUzgkO3aU2Y0aw2mwG1fTXUwuaINbkXGd/xYbD/lRYxCBbXWbKp4DwzfmQWE5nmny1RAMLDk8ZU7d2WFA2U3Kt2UAdRC6wi/luK3pQr4vOuAYaTS9PxkLArbPNsEPAcsaV8SEec0zLLBQLyIeCykcMB+8LaWmQPYdpJj50M6g5i4p7S/97ImyJE1MMXdtA0I+7stg8BaR7KqIbYE6h19OBx2aefnSDtLnTjveoQp7CvikViL4Yi8wYbZdTsNOQ6DfHfDQsO4iopi/w8z4rJqUzfOas0OaQO+oHZ69PtysJKo4pBGBY5ixpU8DrMtHZCAvm4sVLu6RqWmEU2FcoGB+fmTZvsy1fsxAKrz3OC6/AMrKQEpIxqkyU9mltfo5MgZdRWsHom9YahE8= luigi.difraia@MJ004442" }, "production": { @@ -37,6 +39,7 @@ "matteo": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjBpFFRlhouHQKfiJcVmnymt4NGfUv32r0vlb2YHkj5NZMr5WP94oPO4TRAYkuoItpsVY/7KYzeB8j6YfDyzlQOaV+leopq1oal//RUIBiRcDlHU3RVpyA/+aZCeFUUFxMT0eaRirL/uqdpRhWEmga76KglUGk1toiebZHZeMGRfP96/W5rGAczMdVSXHr2fC2rvf+KRYAc6R898IMPLZQJznNoz0JLhKPB7I3NLhME5c+kPUGmuWO9G2Fvd/zbrPQoE75ZP7isDvfpqkfFndZrCYKBIjZup7Du/KIx6ISvtsRTChhU1fLGbDMIjdfAqdp5Ziaxf2sN2H2jTx68Rgg9bX6z2iZRoi5bX9Adz6SjGgEXLD+xo4nAkQqiTrC7Pj3HUKVXbqtf68B7+Yi9yb8clkpraiVruAPEsrqb9hczpMvPzdMbtBukdqZ6NSs6rPMWEJ6cdNjEEYpmc8b3lHq1Pg66ZrNOVlAUwB2mPPm7vyJ9pN3sOFLnDvRU49QXY0= matteo.pecorelli@L1464", "garthj": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCr5r6h19aPB7FUSreTO2557YBXm+LTE1i0GF7+3ktSRxuiaUvwR4XLXtoFNjMXVG5gsEgWsHph2GsKjKgOgCdbxc+06d0apSYkfu/qau8dFvFkqwpcOxR6eBsPvQoGSKsLP0oKq/P2gmy9szbV53/wD3Zt3XinQxyETlxVgFH+pcat6T5j8HUj34O3D3wYR7vBMrZPgWZJxm4iufnL56Ze/JBqZv+772Q29pV8VhGHBJuvXppUgAP1Ag/MdUtYdGvWl9M94AeVygJ64GE446+Ptc3qBMetSCNbHGLofuHokLJqBW5W1S9se7lVWVMvU2cWdos95AOlGNDXlTKGgdjxURPdItpwQjL5eh3MtzQYCeT0mVnLL6fO6pmNgHc1V5yr7gqTTbg8F8gdEliiQrZypq4KizNDEYM/zWQUXGh5Aguon3EzRKVViOzLf4sn0H5NnchO8IsvfSNvd22QFWHb6LShBr9hCmcqUqxAJWA7/P1KGDm0PyMXcw4SIn6bkXk= garthjackson@FVFGP0AAQ05P", "zoltanp": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDKtvsQ949K34yIp50dVyc/jAK/WlX1uuwYKPKBWybioBcuC5L+YcMc/uuZRMUFQlPxMdsWKzzQ8vaIzo05n+3Wx/TvzW/FAR8T0hI6IkkPUcv0p0lbjbFyJO9SdN2Goem8ujRkFBykON9zEfLHzFMaEmk/d8QkkIEr6sglnSr56igBWVGKkeZq74R+RyRmV0WThxq4BptfP7v2NDl8XjqdNZ5jYOT/FayqlS1Qxmsd9VjPj3BtTpf2KeOP9grsPGDTM9O4+EVSOtr/cXvJ9ELfbUj6JGtBZVG3jv+6QMfRHn66gyknzkAsyv8s6b2JY7rUuLRyxA8z+DuRiRL+K4HLvcvYdMoYaYjSU1gyPPZEoUiwSP62fBoCZr2ZibCPjkKbpBKFUgCTNsC598p9Pb5ucnnAGiKiyTyyH6xRuxpSW1Kzhby1PJ8LYJbH/Zl+083XG8HqSQONcM+eho5k6FO+8zLOWGhpByVwpsIlHHs9189YGhvbLhs+KDtYiRl2Rbryz7Oxx8/TFN6Ml1tBKcTFSIXcNhzy5yCf9nFeIS5FH/Qf/6ARJEkNGA+KEGrxlE6zvJrCnMGNoTWpV/fhZcSMhh8KEmWge2VOLl5UQ69QFgQHboj4pR+TFAwndMA7E4fBKiVgfvfhsd1KxUMvNLRaLGiYTOfOP5brNNNuiZmmmQ==", + "georged": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5y0fg79fngQSwXNYfOhdFmfZYflx6lkm0AYRAJYKbc3F4xbbtdS/UoEF7E1tJ6GpprKLDfJT59Ax7jZVuxo7byxUHexlF6kdgG1MWy4rfTECMgciabrlqDFDa++xJ9EuR7sgh73Mm9wrLsHWoUgw6zDyb6ois8NtYp9TyZChh2C23bR2J2Ljo1AsGEEeeprLtJV+Vhm7xa2kdg29RAIKxoDJzkeaU21YDhNvVbu+0RNoCA4UFvSGheRidJB2vHK+xIg3UivgQaK7cTnE8ds+Q4y+Qa2GeVIPPpnfZXbLdk+B3bFiOdhizTT6mP/q35RXbq3emhj4qEnit5xKI2Wr5DpaooyVroUmjg22DCy89gW0LlfJ3Z5jgqL+LGkT8nt6NfDJ/e9ySJ6IpUm95hznuxcCszaDn4VpNXpUlDvzkjKNPGNursjGQ96gWm5gzVD9DHgxtVKSXgzxjuTNA/Y+rvEXdlYDZpw7Ap89HDU5siQY5mJaUsZGIm94BhOzvmUWpP/NM7Q4CkH16NCED4b/nWV+am/OToXrkD05Rf5jUOWqZE30QPL24M9p5SrMhSBM0ET7mtBVo4GUkXjoc5glYSSgHVBa/QvJ/oqiwbixpsJJbqleqzoFmhcOVo9HlZTdnmgI+OUmM3StrUyiDxg3Zq5Lw5UEAUS5ux9xaI7/tPw== george.dinu@MJ006819", "luigid": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDED0d5x/+qAvao8fXW0fJVaF4b2WnOCisvNV8W5ua3brwh8WjOwKvLlZIVip9es9Q3KVk/bo67rRBoFYlODdoZHvH39ZcUHCZs9h7vv5PoDySXcp6UBMMUzgkO3aU2Y0aw2mwG1fTXUwuaINbkXGd/xYbD/lRYxCBbXWbKp4DwzfmQWE5nmny1RAMLDk8ZU7d2WFA2U3Kt2UAdRC6wi/luK3pQr4vOuAYaTS9PxkLArbPNsEPAcsaV8SEec0zLLBQLyIeCykcMB+8LaWmQPYdpJj50M6g5i4p7S/97ImyJE1MMXdtA0I+7stg8BaR7KqIbYE6h19OBx2aefnSDtLnTjveoQp7CvikViL4Yi8wYbZdTsNOQ6DfHfDQsO4iopi/w8z4rJqUzfOas0OaQO+oHZ69PtysJKo4pBGBY5ixpU8DrMtHZCAvm4sVLu6RqWmEU2FcoGB+fmTZvsy1fsxAKrz3OC6/AMrKQEpIxqkyU9mltfo5MgZdRWsHom9YahE8= luigi.difraia@MJ004442" }, "default": {} diff --git a/terraform/environments/xhibit-portal/lambda/delete_old_ami.zip b/terraform/environments/xhibit-portal/lambda/delete_old_ami.zip deleted file mode 100644 index cf75e6c2d27..00000000000 Binary files a/terraform/environments/xhibit-portal/lambda/delete_old_ami.zip and /dev/null differ diff --git a/terraform/environments/xhibit-portal/lambda/lambda_function.zip b/terraform/environments/xhibit-portal/lambda/lambda_function.zip deleted file mode 100644 index 8a7ef687bd6..00000000000 Binary files a/terraform/environments/xhibit-portal/lambda/lambda_function.zip and /dev/null differ diff --git a/terraform/modules/baseline/cloudwatch.tf b/terraform/modules/baseline/cloudwatch.tf index a63f7857f70..229af7385da 100644 --- a/terraform/modules/baseline/cloudwatch.tf +++ b/terraform/modules/baseline/cloudwatch.tf @@ -35,7 +35,13 @@ module "cloudwatch_dashboard" { ec2_instances = module.ec2_instance periodOverride = lookup(each.value, "periodOverride", null) start = lookup(each.value, "start", null) - widget_groups = lookup(each.value, "widget_groups", []) + widget_groups = [ + for widget_group in lookup(each.value, "widget_groups", []) : merge(widget_group, + lookup(widget_group, "account_name", null) == null ? {} : { + accountId = var.environment.account_ids[lookup(widget_group, "account_name", null)] + } + ) + ] } resource "aws_cloudwatch_log_group" "this" { diff --git a/terraform/modules/baseline/outputs.tf b/terraform/modules/baseline/outputs.tf index 6c2d43ae440..2255c5325bf 100644 --- a/terraform/modules/baseline/outputs.tf +++ b/terraform/modules/baseline/outputs.tf @@ -146,11 +146,26 @@ output "security_groups" { value = aws_security_group.this } +output "schedule_alarms_lambda" { + description = "schedule alarms lambda output" + value = module.schedule_alarms_lambda +} + output "sns_topics" { description = "map of aws_sns_topic resources corresponding to var.sns_topics" value = aws_sns_topic.this } +output "ssm_associations" { + description = "map of aws_ssm_association resources corresponding to var.ssm_association" + value = aws_ssm_association.this +} + +output "ssm_documents" { + description = "map of aws_ssm_document resources corresponding to var.ssm_documents" + value = aws_ssm_document.this +} + output "ssm_parameters" { description = "map of security groups corresponding to var.ssm_parameters" value = merge( diff --git a/terraform/modules/baseline/schedule_alarms_lambda.tf b/terraform/modules/baseline/schedule_alarms_lambda.tf index 7a195694a5b..e0e788d1956 100644 --- a/terraform/modules/baseline/schedule_alarms_lambda.tf +++ b/terraform/modules/baseline/schedule_alarms_lambda.tf @@ -6,7 +6,7 @@ module "schedule_alarms_lambda" { length(var.schedule_alarms_lambda.alarm_patterns) > 0 ) ? 1 : 0 - lambda_function_name = var.schedule_alarms_lambda.function_name + lambda_function_name = "schedule-alarms" lambda_log_level = var.schedule_alarms_lambda.lambda_log_level alarm_list = var.schedule_alarms_lambda.alarm_list diff --git a/terraform/modules/baseline/ssm.tf b/terraform/modules/baseline/ssm.tf index 588f02e66a2..ffecf9b6634 100644 --- a/terraform/modules/baseline/ssm.tf +++ b/terraform/modules/baseline/ssm.tf @@ -59,7 +59,7 @@ resource "aws_ssm_association" "this" { apply_only_at_cron_interval = each.value.apply_only_at_cron_interval association_name = each.key - name = each.value.name + name = try(aws_ssm_document.this[each.value.name].name, each.value.name) # so ssm_doc is created first max_concurrency = each.value.max_concurrency max_errors = each.value.max_errors schedule_expression = each.value.schedule_expression diff --git a/terraform/modules/baseline/variables.tf b/terraform/modules/baseline/variables.tf index e50df55f260..b923bbe7c3f 100644 --- a/terraform/modules/baseline/variables.tf +++ b/terraform/modules/baseline/variables.tf @@ -97,6 +97,7 @@ variable "cloudwatch_dashboards" { # header_markdown = optional(string) # include a header text widget if set # width = number # width of each widget, must be divisor of 24 # height = number # height of each widget + # account_name = optional(string) # for monitoring account, limit to given account # widgets = list(any) # no need to set x,y,width,height # })), []) #})) @@ -492,6 +493,7 @@ variable "fsx_windows" { deployment_type = optional(string) # [SINGLE_AZ_1 (default), SINGLE_AZ_2, MULTI_AZ_1] kms_key_id = optional(string, "general") preferred_subnet_name = optional(string, "private") # set if MULTI_AZ_1 + preferred_subnet_id = optional(string) # set if MULTI_AZ_1 preferred_availability_zone = optional(string) # set if MULTI_AZ_1 security_group_ids = optional(list(string)) skip_final_backup = optional(bool) @@ -918,13 +920,12 @@ variable "s3_buckets" { variable "schedule_alarms_lambda" { description = "" type = object({ - function_name = optional(string, null) lambda_log_level = optional(string, "INFO") alarm_list = optional(list(string), []) alarm_patterns = optional(list(string), []) disable_weekend = optional(bool, true) - start_time = optional(string, "06:15") - end_time = optional(string, "20:45") + start_time = optional(string, "20:45") # when to disable alarm + end_time = optional(string, "06:15") # when to re-enable alarm tags = optional(map(string), {}) }) default = {} diff --git a/terraform/modules/baseline_presets/cloudwatch_dashboards.tf b/terraform/modules/baseline_presets/cloudwatch_dashboards.tf index 6bd8fbdeea2..9995e92a7b2 100644 --- a/terraform/modules/baseline_presets/cloudwatch_dashboards.tf +++ b/terraform/modules/baseline_presets/cloudwatch_dashboards.tf @@ -460,6 +460,23 @@ locals { } } } + endpoint-response-time-ms = { + type = "metric" + expression = "SORT(SEARCH('{CWAgent,InstanceId,type,type_instance} MetricName=\"collectd_endpoint_response_time_ms_value\"','Maximum'),MAX,DESC)" + properties = { + view = "timeSeries" + stacked = false + region = "eu-west-2" + title = "endpoint-response-time-ms" + stat = "Maximum" + yAxis = { + left = { + showUnits = false, + label = "ms" + } + } + } + } endpoint-cert-days-to-expiry = { type = "metric" alarm_threshold = local.cloudwatch_metric_alarms.ec2_instance_cwagent_collectd_endpoint_monitoring.endpoint-cert-expires-soon.threshold @@ -775,6 +792,57 @@ locals { } } } + load-balancer-port-allocation-error-count = { + type = "metric" + expression = "SORT(SEARCH('{AWS/NetworkELB,LoadBalancer,LoadBalancer} MetricName=\"PortAllocationErrorCount\"','Sum'),SUM,DESC)" + properties = { + view = "timeSeries" + stacked = true + region = "eu-west-2" + title = "NLB port-allocation-error-count" + stat = "Sum" + yAxis = { + left = { + showUnits = false, + label = "error count" + } + } + } + } + load-balancer-rejected-flow-count = { + type = "metric" + expression = "SORT(SEARCH('{AWS/NetworkELB,LoadBalancer,LoadBalancer} MetricName=\"RejectedFlowCount\"','Sum'),SUM,DESC)" + properties = { + view = "timeSeries" + stacked = true + region = "eu-west-2" + title = "NLB rejected-flow-count" + stat = "Sum" + yAxis = { + left = { + showUnits = false, + label = "flow count" + } + } + } + } + load-balancer-tcp-client-reset-count = { + type = "metric" + expression = "SORT(SEARCH('{AWS/NetworkELB,LoadBalancer,LoadBalancer} MetricName=\"TCP_Client_Reset_Count\"','Sum'),SUM,DESC)" + properties = { + view = "timeSeries" + stacked = true + region = "eu-west-2" + title = "NLB tcp-client-reset-count" + stat = "Sum" + yAxis = { + left = { + showUnits = false, + label = "reset count" + } + } + } + } } ssm = { ssm-command-success-count = { @@ -991,6 +1059,7 @@ locals { height = 8 widgets = [ local.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_endpoint_monitoring.endpoint-status, + local.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_endpoint_monitoring.endpoint-response-time-ms, local.cloudwatch_dashboard_widgets.ec2_instance_cwagent_collectd_endpoint_monitoring.endpoint-cert-days-to-expiry, ] } @@ -1025,6 +1094,9 @@ locals { local.cloudwatch_dashboard_widgets.network_lb.load-balancer-processed-bytes, local.cloudwatch_dashboard_widgets.network_lb.load-balancer-processed-packets, local.cloudwatch_dashboard_widgets.network_lb.load-balancer-peak-packets-per-second, + local.cloudwatch_dashboard_widgets.network_lb.load-balancer-port-allocation-error-count, + local.cloudwatch_dashboard_widgets.network_lb.load-balancer-rejected-flow-count, + local.cloudwatch_dashboard_widgets.network_lb.load-balancer-tcp-client-reset-count, ] } ssm_command = { diff --git a/terraform/modules/baseline_presets/cloudwatch_metric_alarms.tf b/terraform/modules/baseline_presets/cloudwatch_metric_alarms.tf index 98cc4948e7c..8a29488c93b 100644 --- a/terraform/modules/baseline_presets/cloudwatch_metric_alarms.tf +++ b/terraform/modules/baseline_presets/cloudwatch_metric_alarms.tf @@ -339,7 +339,7 @@ locals { period = "60" statistic = "Maximum" threshold = "1" - alarm_description = "Triggers if curl returns error for given endpoint from this EC2" + alarm_description = "Triggers if curl returns error for given endpoint from this EC2. See https://dsdmoj.atlassian.net/wiki/spaces/DSTT/pages/5295505478" alarm_actions = var.options.cloudwatch_metric_alarms_default_actions ok_actions = var.options.cloudwatch_metric_alarms_default_actions } @@ -349,10 +349,10 @@ locals { datapoints_to_alarm = "1" metric_name = "collectd_endpoint_cert_expiry_value" namespace = "CWAgent" - period = "86400" + period = "7200" statistic = "Minimum" threshold = "14" - alarm_description = "Triggers if collectd-endpoint-monitoring detects an endpoint with a certificate due to expire shortly. See https://dsdmoj.atlassian.net/wiki/spaces/DSTT/pages/4615340266" + alarm_description = "Triggers if collectd-endpoint-monitoring detects an endpoint with a certificate due to expire shortly. See https://dsdmoj.atlassian.net/wiki/spaces/DSTT/pages/5303664662" alarm_actions = var.options.cloudwatch_metric_alarms_default_actions ok_actions = var.options.cloudwatch_metric_alarms_default_actions } @@ -449,4 +449,25 @@ locals { } } } + + cloudwatch_metric_alarms_by_sns_topic = { + for sns_key, sns_value in local.sns_topics : sns_key => { + for namespace_key, namespace_value in local.cloudwatch_metric_alarms : namespace_key => { + for alarm_key, alarm_value in namespace_value : alarm_key => merge(alarm_value, { + alarm_actions = [sns_key] + ok_actions = [sns_key] + }) + } + } + } + + # alarms added via baseline. Put SSM command alerts in dso-pipelines so it doesn't clutter main application alerts + cloudwatch_metric_alarms_baseline = merge( + var.options.enable_ssm_command_monitoring ? { + "failed-ssm-command-${var.environment.account_name}" = local.cloudwatch_metric_alarms_by_sns_topic["dso-pipelines-pagerduty"].ssm.failed-ssm-command + } : {}, + var.options.enable_ssm_missing_metric_monitoring ? { + "ssm-command-metrics-missing-${var.environment.account_name}" = local.cloudwatch_metric_alarms_by_sns_topic["dso-pipelines-pagerduty"].ssm.ssm-command-metrics-missing + } : {}, + ) } diff --git a/terraform/modules/baseline_presets/iam_roles.tf b/terraform/modules/baseline_presets/iam_roles.tf index 03473411e07..dde1d86a9ef 100644 --- a/terraform/modules/baseline_presets/iam_roles.tf +++ b/terraform/modules/baseline_presets/iam_roles.tf @@ -5,7 +5,6 @@ locals { var.options.enable_ec2_delius_dba_secrets_access ? ["EC2OracleEnterpriseManagementSecretsRole"] : [], var.options.enable_image_builder ? ["EC2ImageBuilderDistributionCrossAccountRole"] : [], var.options.enable_ec2_oracle_enterprise_managed_server ? ["EC2OracleEnterpriseManagementSecretsRole"] : [], - var.options.enable_observability_platform_monitoring ? ["observability-platform"] : [], try(length(var.options.cloudwatch_metric_oam_links), 0) != 0 ? ["CloudWatch-CrossAccountSharingRole"] : [], var.options.enable_vmimport ? ["vmimport"] : [], ])) @@ -92,21 +91,6 @@ locals { ] } - # allow Observability Plaform read-only access to Cloudwatch metrics - observability-platform = { - assume_role_policy = [{ - effect = "Allow" - actions = ["sts:AssumeRole"] - principals = { - type = "AWS" - identifiers = ["observability-platform-development"] - } - }] - policy_attachments = [ - "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess", - ] - } - vmimport = { assume_role_policy = [{ effect = "Allow" diff --git a/terraform/modules/baseline_presets/outputs.tf b/terraform/modules/baseline_presets/outputs.tf index 7ddb18bcb6c..01cc5560724 100644 --- a/terraform/modules/baseline_presets/outputs.tf +++ b/terraform/modules/baseline_presets/outputs.tf @@ -44,18 +44,15 @@ output "cloudwatch_metric_alarms" { value = local.cloudwatch_metric_alarms } +output "cloudwatch_metric_alarms_baseline" { + description = "Map of common cloudwatch metric alarms that can be passed into baseline directly as specified by var.options.enable_ssm_command_monitoring for example" + value = local.cloudwatch_metric_alarms_baseline +} + output "cloudwatch_metric_alarms_by_sns_topic" { description = "Map of sns topic key to cloudwatch metric alarms grouped by namespace, where the default action is the sns topic key" - value = { - for sns_key, sns_value in local.sns_topics : sns_key => { - for namespace_key, namespace_value in local.cloudwatch_metric_alarms : namespace_key => { - for alarm_key, alarm_value in namespace_value : alarm_key => merge(alarm_value, { - alarm_actions = [sns_key] - }) - } - } - } + value = local.cloudwatch_metric_alarms_by_sns_topic } output "iam_roles" { diff --git a/terraform/modules/baseline_presets/sns_topics.tf b/terraform/modules/baseline_presets/sns_topics.tf index 89b75eade1a..346a1c9e403 100644 --- a/terraform/modules/baseline_presets/sns_topics.tf +++ b/terraform/modules/baseline_presets/sns_topics.tf @@ -6,8 +6,14 @@ # from the modernisation platform managed pagerduty_integration_keys locals { + + pagerduty_integrations = merge( + var.options.enable_ssm_command_monitoring ? { dso-pipelines-pagerduty = "dso-pipelines" } : {}, + var.options.sns_topics.pagerduty_integrations + ) + sns_topics_pagerduty_integrations = { - for key, value in var.options.sns_topics.pagerduty_integrations : key => { + for key, value in local.pagerduty_integrations : key => { display_name = "Pager duty integration for ${value}" kms_master_key_id = "general" subscriptions = { diff --git a/terraform/modules/baseline_presets/variables.tf b/terraform/modules/baseline_presets/variables.tf index 097391f56d0..41f697eeca3 100644 --- a/terraform/modules/baseline_presets/variables.tf +++ b/terraform/modules/baseline_presets/variables.tf @@ -37,11 +37,12 @@ variable "options" { enable_ec2_session_manager_cloudwatch_logs = optional(bool, false) # create SSM doc and log group for session manager logs enable_ec2_ssm_agent_update = optional(bool, false) # create SSM association for auto-update of SSM agent. update-ssm-agent tag needs to be set on EC2s also enable_ec2_user_keypair = optional(bool, false) # create secret and key-pair for ec2-user - enable_observability_platform_monitoring = optional(bool, false) # create role for observability platform monitroing enable_s3_bucket = optional(bool, false) # create s3-bucket S3 bucket for general use enable_s3_db_backup_bucket = optional(bool, false) # create db-backup S3 buckets enable_s3_shared_bucket = optional(bool, false) # create devtest and preprodprod S3 bucket for sharing between accounts enable_s3_software_bucket = optional(bool, false) # create software S3 bucket in test account for image builder/configuration-management + enable_ssm_command_monitoring = optional(bool, false) # create SNS topic and alarms for SSM command monitoring + enable_ssm_missing_metric_monitoring = optional(bool, false) # create alarm if SSM command metrics are missing enable_vmimport = optional(bool, false) # create role for vm imports route53_resolver_rules = optional(map(list(string)), {}) # create route53 resolver rules; list of map keys to filter local.route53_resolver_rules_all iam_service_linked_roles = optional(list(string)) # create iam service linked roles; list of map keys to filter local.iam_service_linked_roles; default is to create all diff --git a/terraform/modules/cloudwatch_dashboard/main.tf b/terraform/modules/cloudwatch_dashboard/main.tf index 367f521427b..91231842174 100644 --- a/terraform/modules/cloudwatch_dashboard/main.tf +++ b/terraform/modules/cloudwatch_dashboard/main.tf @@ -21,6 +21,17 @@ locals { ]) } ] + widget_groups_search_filter_dimension = [ + for widget_group in var.widget_groups : lookup(widget_group, "search_filter_dimension", null) == null ? {} : { + search_filter = join("", [ + lookup(widget_group.search_filter_dimension, "negate", false) ? "NOT " : "", + widget_group.search_filter_dimension.name, + "=(", + join(" OR ", widget_group.search_filter_dimension.values), + ")", + ]) + } + ] widget_groups = [ for i in range(length(var.widget_groups)) : merge(var.widget_groups[i], { @@ -68,10 +79,11 @@ locals { y = (floor(j * local.widget_groups[i].width / 24) * local.widget_groups[i].height) + local.widget_group_y[i] + local.widget_group_header_height[i] }, try(strcontains(local.widget_groups[i].widgets[j].expression, "InstanceId"), false) ? local.widget_groups_search_filter_ec2[i] : {}, + try(strcontains(local.widget_groups[i].widgets[j].expression, local.widget_groups[i].search_filter_dimension.name), false) ? local.widget_groups_search_filter_dimension[i] : {}, local.widget_groups[i].widgets[j], - var.accountId == null ? {} : { + var.accountId == null && lookup(local.widget_groups[i], "accountId", null) == null ? {} : { properties = merge(local.widget_groups[i].widgets[j].properties, { - accountId = var.accountId + accountId = coalesce(lookup(local.widget_groups[i], "accountId", null), var.accountId) }) } ) if local.widget_groups[i].widgets[j] != null diff --git a/terraform/modules/cloudwatch_dashboard/variables.tf b/terraform/modules/cloudwatch_dashboard/variables.tf index 725baff1049..da9fb66d5db 100644 --- a/terraform/modules/cloudwatch_dashboard/variables.tf +++ b/terraform/modules/cloudwatch_dashboard/variables.tf @@ -34,6 +34,7 @@ variable "widget_groups" { # header_markdown = optional(string) # include a header text widget if set # width = number # width of each widget, must be divisor of 24 # height = number # height of each widget + # accountId = optional(string) # for monitoring accounts, apply this accountId to all widgets in group # search_filter = optional(object({ # optionally apply filter to each 'expression' widget # negate = bool # negate the filter, e.g. add NOT to the expression # ec2_instance = optional(list(string)) # provide list of EC2 InstanceIds @@ -42,9 +43,14 @@ variable "widget_groups" { # value = string # }))) # })) + # search_filter_dimension = optional(object({ # optionally apply filter to each 'expression' widget + # negate = optional(bool) # negate the filter, e.g. add NOT to the expression + # name = string # the name of the dimension to filter + # values = list(string) # list of dimension values + # })) # add_ebs_widgets = optional(object({ - # iops = bool  # add additional widgets showing EBS IOPS vs configured max - # throughput = bool # add additional widgets showing EBS thoughput vs configured max + # iops = bool # add additional widgets showing EBS IOPS vs configured max + # throughput = bool # add additional widgets showing EBS thoughput vs configured max # })) # widgets = list(any) # as per https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html # NOTE: widget can also use following fields for module only diff --git a/terraform/modules/efs/main.tf b/terraform/modules/efs/main.tf index f9c8f010096..155fb169d7c 100644 --- a/terraform/modules/efs/main.tf +++ b/terraform/modules/efs/main.tf @@ -41,7 +41,7 @@ resource "aws_efs_file_system" "this" { provisioned_throughput_in_mibps = var.file_system.provisioned_throughput_in_mibps throughput_mode = var.file_system.throughput_mode - # annoyingly you have to define each option as separate block + # annoyingly you have to define each option as separate block dynamic "lifecycle_policy" { for_each = var.file_system.lifecycle_policy.transition_to_archive != null ? [var.file_system.lifecycle_policy] : [] content { @@ -66,6 +66,15 @@ resource "aws_efs_file_system" "this" { }) } +# disable automatic backups - use mod platform everything vault instead +resource "aws_efs_backup_policy" "policy" { + file_system_id = aws_efs_file_system.this.id + + backup_policy { + status = "DISABLED" + } +} + data "aws_iam_policy_document" "this" { count = var.policy != null ? 1 : 0 diff --git a/terraform/modules/fargate_graceful_retirement/eventbridge.tf b/terraform/modules/fargate_graceful_retirement/eventbridge.tf index 4310d4894e7..2386c3685ac 100644 --- a/terraform/modules/fargate_graceful_retirement/eventbridge.tf +++ b/terraform/modules/fargate_graceful_retirement/eventbridge.tf @@ -1,5 +1,5 @@ resource "aws_cloudwatch_event_rule" "ecs_restart_rule" { - name = "ecs_task_retirement_rul" + name = "ecs_task_retirement_rule" description = "Rule to catch AWS ECS Task Patching Retirement events" event_pattern = jsonencode({ @@ -122,3 +122,8 @@ resource "aws_iam_policy" "eventbridge_execution_role_policy" { ] }) } + +resource "aws_iam_role_policy_attachment" "eventbridge_execution_role_policy" { + policy_arn = aws_iam_policy.eventbridge_execution_role_policy.arn + role = aws_iam_role.eventbridge_execution_role.name +} diff --git a/terraform/modules/fargate_graceful_retirement/files/ecs_restart/lambda_function.py b/terraform/modules/fargate_graceful_retirement/files/ecs_restart/lambda_function.py index 4e490ba8023..b7703d5fc55 100644 --- a/terraform/modules/fargate_graceful_retirement/files/ecs_restart/lambda_function.py +++ b/terraform/modules/fargate_graceful_retirement/files/ecs_restart/lambda_function.py @@ -1,25 +1,27 @@ import json -import boto3 import os +import boto3 + + def lambda_handler(event, context): print("Event received:", json.dumps(event)) try: # Create an ECS client using boto3 - ecs_client = boto3.client('ecs') + ecs_client = boto3.client("ecs") # Extract the affected entities from the event - affected_entities = event['detail']['affectedEntities'] + affected_entities = event["detail"]["affectedEntities"] # Iterate over each affected entity for entity in affected_entities: # Get the entity value - entity_value = entity.get('entityValue') + entity_value = entity.get("entityValue") if entity_value is not None: # Extract cluster name and service name from the entity value - cluster_name = entity_value.split('|')[0] - service_name = entity_value.split('|')[1] + cluster_name = entity_value.split("|")[0] + service_name = entity_value.split("|")[1] print("Cluster name:", cluster_name) print("Service name:", service_name) @@ -29,22 +31,19 @@ def lambda_handler(event, context): response = ecs_client.update_service( cluster=cluster_name, service=service_name, - forceNewDeployment=True + forceNewDeployment=True, ) - if os.environ.get('DEBUG_LOGGING', False): + if os.environ.get("DEBUG_LOGGING", False): print("[DEBUG] Update service response:", response) else: print("No entity value found in the event") return { - 'statusCode': 200, - 'body': json.dumps('Handled ECS Task Patching Retirement') - 'restarted_services': affected_entities + "statusCode": 200, + "body": json.dumps("Handled ECS Task Patching Retirement"), + "restarted_services": affected_entities, } except Exception as e: print("Error updating service:", e) - return { - 'statusCode': 500, - 'body': json.dumps('Error updating service') - } + return {"statusCode": 500, "body": json.dumps("Error updating service")} diff --git a/terraform/modules/fsx_windows/README.md b/terraform/modules/fsx_windows/README.md index bd2cedf0585..13f8581883d 100644 --- a/terraform/modules/fsx_windows/README.md +++ b/terraform/modules/fsx_windows/README.md @@ -71,7 +71,7 @@ This: module "fsx_windows1" { source = "../../modules/fsx_windows" - preferred_subnet_id = data.aws_subnet.private_subnets_a.id + preferred_availability_zone = "eu-west-2a" deployment_type = "MULTI_AZ_1" name = "fsx_windows1" security_groups = [aws_security_group.ec2.id] diff --git a/terraform/modules/ip_addresses/external.tf b/terraform/modules/ip_addresses/external.tf index 58387cb00f3..d75a7732a77 100644 --- a/terraform/modules/ip_addresses/external.tf +++ b/terraform/modules/ip_addresses/external.tf @@ -30,7 +30,11 @@ locals { "49.248.250.6/32" ] serco = [ - "217.22.14.0/24" + "217.22.14.0/24", + "18.135.54.44/32", + "18.175.105.241/32", + "35.177.142.157/32", + "128.77.110.45/32", ] rrp = [ "62.253.83.37/32" diff --git a/terraform/modules/ip_addresses/moj.tf b/terraform/modules/ip_addresses/moj.tf index f880ce0d7c1..0016980e68e 100644 --- a/terraform/modules/ip_addresses/moj.tf +++ b/terraform/modules/ip_addresses/moj.tf @@ -27,7 +27,6 @@ locals { mojo_arkf_internet_egress_exponential_e = "51.149.249.32/29" mojo_arkf_internet_egress_vodafone = "194.33.248.0/29" - ark_dc_external_internet = [ "195.59.75.0/24", "194.33.192.0/25", @@ -42,7 +41,7 @@ locals { "194.33.218.0/24" ] - digital_prisons = [ + mojo_azure_landing_zone_egress = [ "20.49.214.199/32", "20.49.214.228/32", "20.26.11.71/32", @@ -53,7 +52,6 @@ locals { palo_alto_primsa_access_third_party = "128.77.75.0/25" palo_alto_primsa_access_residents = "128.77.75.128/26" - ark_dc_external_internet = [ "195.59.75.0/24", "194.33.192.0/25", @@ -92,6 +90,7 @@ locals { local.moj_cidr.mojo_arkf_internet_egress_exponential_e, local.moj_cidr.mojo_arkf_internet_egress_vodafone, local.moj_cidr.ark_dc_external_internet, + local.moj_cidr.mojo_azure_landing_zone_egress ]) trusted_moj_enduser_internal = [ diff --git a/terraform/modules/schedule_alarms_lambda/main.tf b/terraform/modules/schedule_alarms_lambda/main.tf index 3c56e4dde8f..b35a3c6771f 100644 --- a/terraform/modules/schedule_alarms_lambda/main.tf +++ b/terraform/modules/schedule_alarms_lambda/main.tf @@ -6,6 +6,15 @@ data "archive_file" "lambda_function_payload" { } resource "aws_lambda_function" "alarm_scheduler" { + #checkov:skip=CKV_AWS_50: "X-Ray tracing is enabled for Lambda" - could be implemented but not required + #checkov:skip=CKV_AWS_115: "Ensure that AWS Lambda function is configured for function-level concurrent execution limit" - seems unnecessary for this module, could be added as reserved_concurrent_executions = 100 or similar (smaller) number. + #checkov:skip=CKV_AWS_116: "Ensure that AWS Lambda function is configured for a Dead Letter Queue(DLQ)" - not required + #checkov:skip=CKV_AWS_117: "Ensure that AWS Lambda function is configured inside a VPC" - irrelevant for this module + #checkov:skip=CKV_AWS_158: "Ensure that CloudWatch Log Group is encrypted by KMS" - not required + #checkov:skip=CKV_AWS_173: "Check encryption settings for Lambda environmental variable" - not required + #checkov:skip=CKV_AWS_272: "Ensure AWS Lambda function is configured to validate code-signing" - code signing is not implemented + #checkov:skip=CKV_AWS_338: "Ensure CloudWatch log groups retains logs for at least 1 year" - only 7 days required, see execution_logs below + filename = "${path.module}/lambda/alarm_scheduler.zip" function_name = var.lambda_function_name architectures = ["arm64"] @@ -27,6 +36,8 @@ resource "aws_lambda_function" "alarm_scheduler" { } resource "aws_cloudwatch_log_group" "execution_logs" { + #checkov:skip=CKV_AWS_158: "Ensure that CloudWatch Log Group is encrypted by KMS" - not required + #checkov:skip=CKV_AWS_338: "Ensure CloudWatch log groups retains logs for at least 1 year" - only 7 days required, see below name = format("/aws/lambda/%s", var.lambda_function_name) retention_in_days = 7