diff --git a/README.md b/README.md index 1af150ed..6d1673c6 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ To deploy the Vault cluster: If you are just experimenting with this Module, you may find it more convenient to use one of our official public AMIs: - [Latest Ubuntu 16 AMIs](https://github.com/hashicorp/terraform-aws-vault/tree/master/_docs/ubuntu16-ami-list.md). - - [Latest Amazon Linux AMIs](https://github.com/hashicorp/terraform-aws-vault/tree/master/_docs/amazon-linux-ami-list.md). + - [Latest Amazon Linux 2 AMIs](https://github.com/hashicorp/terraform-aws-vault/tree/master/_docs/amazon-linux-ami-list.md). **WARNING! Do NOT use these AMIs in your production setup. In production, you should build your own AMIs in your own AWS account.** diff --git a/examples/vault-auto-unseal/README.md b/examples/vault-auto-unseal/README.md index ba069ead..19bd69de 100644 --- a/examples/vault-auto-unseal/README.md +++ b/examples/vault-auto-unseal/README.md @@ -49,7 +49,7 @@ even if you immediately delete it. 1. Ssh to an instance in the vault cluster and run `vault operator init` to initialize the cluster, then `vault status` to check that it is unsealed. If you ssh to a different node in the cluster, you might have to restart Vault first with - `sudo supervisorctl restart vault` so it will rejoin the cluster and unseal. + `sudo systemctl restart vault.service` so it will rejoin the cluster and unseal. To avoid doing that, you can start your cluster with initially just one node and start the server, then change the `vault_cluster_size` variable back to 3 and and run `terraform apply again`. The new nodes will join the cluster already unsealed diff --git a/examples/vault-consul-ami/README.md b/examples/vault-consul-ami/README.md index f294c4f5..813c2a8f 100644 --- a/examples/vault-consul-ami/README.md +++ b/examples/vault-consul-ami/README.md @@ -7,7 +7,7 @@ from the Consul AWS Module with [Packer](https://www.packer.io/) to create [Amaz (AMIs)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that have Vault and Consul installed on top of: 1. Ubuntu 16.04 -1. Amazon Linux +1. Amazon Linux 2 You can use this AMI to deploy a [Vault cluster](https://www.vaultproject.io/) by using the [vault-cluster module](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/vault-cluster). This Vault cluster will use Consul as its storage backend, so you can also use the diff --git a/examples/vault-consul-ami/vault-consul.json b/examples/vault-consul-ami/vault-consul.json index 412d3af0..6cc5dc6a 100644 --- a/examples/vault-consul-ami/vault-consul.json +++ b/examples/vault-consul-ami/vault-consul.json @@ -32,17 +32,17 @@ }, "ssh_username": "ubuntu" },{ - "ami_name": "vault-consul-amazon-linux-{{isotime | clean_ami_name}}-{{uuid}}", - "ami_description": "An Amazon Linux AMI that has Vault and Consul installed.", + "ami_name": "vault-consul-amazon-linux-2-{{isotime | clean_ami_name}}-{{uuid}}", + "ami_description": "An Amazon Linux 2 AMI that has Vault and Consul installed.", "instance_type": "t2.micro", - "name": "amazon-linux-ami", + "name": "amazon-linux-2-ami", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", - "name": "*amzn-ami-hvm-*", + "name": "*amzn2-ami-hvm-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, @@ -87,7 +87,7 @@ },{ "type": "shell", "inline": [ - "if [ '{{user `install_auth_signing_script`}}' = 'true' ]; then", + "if [[ '{{user `install_auth_signing_script`}}' == 'true' ]]; then", "sudo mv /tmp/sign-request.py /opt/vault/scripts/", "else", "sudo rm /tmp/sign-request.py", @@ -99,23 +99,29 @@ "sudo chmod -R 600 /opt/vault/tls", "sudo chmod 700 /opt/vault/tls", "sudo /tmp/terraform-aws-vault/modules/update-certificate-store/update-certificate-store --cert-file-path /opt/vault/tls/ca.crt.pem" - ] + ], + "inline_shebang": "/bin/bash -e" },{ "type": "shell", "inline": [ "sudo apt-get install -y git", - "if [ '{{user `install_auth_signing_script`}}' = 'true' ]; then", + "if [[ '{{user `install_auth_signing_script`}}' == 'true' ]]; then", "sudo apt-get install -y python-pip", "LC_ALL=C && sudo pip install boto3", "fi" ], + "inline_shebang": "/bin/bash -e", "only": ["ubuntu16-ami"] },{ "type": "shell", "inline": [ - "sudo yum install -y git" + "sudo yum install -y git", + "if [[ '{{user `install_auth_signing_script`}}' == 'true' ]]; then", + "sudo yum install -y python2-pip", + "LC_ALL=C && sudo pip install boto3", + "fi" ], - "only": ["amazon-linux-ami"] + "only": ["amazon-linux-2-ami"] },{ "type": "shell", "inline": [ diff --git a/modules/install-vault/README.md b/modules/install-vault/README.md index 73b6b5fe..fba01fbe 100644 --- a/modules/install-vault/README.md +++ b/modules/install-vault/README.md @@ -8,7 +8,7 @@ This folder contains a script for installing Vault and its dependencies. You can This script has been tested on the following operating systems: * Ubuntu 16.04 -* Amazon Linux +* Amazon Linux 2 There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. @@ -61,7 +61,6 @@ The `install-vault` script does the following: 1. [Creates a user and folders for Vault](#create-a-user-and-folders-for-vault) 1. [Installs Vault binaries and scripts](#install-vault-binaries-and-scripts) 1. [Configures mlock](#configure-mlock) -1. [Installs supervisord](#install-supervisord) 1. [Follow-up tasks](#follow-up-tasks) @@ -94,12 +93,6 @@ Gives Vault permissions to make the `mlock` (memory lock) syscall. This syscall Vault's memory to disk. For more info, see: https://www.vaultproject.io/docs/configuration/#disable_mlock. -### Installs supervisord - -Installs [supervisord](http://supervisord.org/). We use it as a cross-platform supervisor to ensure Vault is started -whenever the system boots and restarted if the Vault process crashes. - - ### Follow-up tasks After the `install-vault` script finishes running, you may wish to do the following: @@ -111,6 +104,14 @@ After the `install-vault` script finishes running, you may wish to do the follow +## Dependencies + +The install script assumes that `systemd` is already installed. We use it as a cross-platform supervisor to ensure Vault is started +whenever the system boots and restarted if the Vault process crashes. Additionally, it is used to store all logs which can be accessed +using `journalctl`. + + + ## Why use Git to install this code? We needed an easy way to install these scripts that satisfied a number of requirements, including working on a variety diff --git a/modules/install-vault/install-vault b/modules/install-vault/install-vault index 8582167e..e78aae6b 100755 --- a/modules/install-vault/install-vault +++ b/modules/install-vault/install-vault @@ -3,7 +3,7 @@ # operating systems: # # 1. Ubuntu 16.04 -# 2. Amazon Linux +# 2. Amazon Linux 2 set -e @@ -15,16 +15,13 @@ readonly DOWNLOAD_PACKAGE_PATH="/tmp/vault.zip" readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly SYSTEM_BIN_DIR="/usr/local/bin" -readonly SUPERVISOR_DIR="/etc/supervisor" -readonly SUPERVISOR_CONF_DIR="$SUPERVISOR_DIR/conf.d" - readonly SCRIPT_NAME="$(basename "$0")" function print_usage { echo echo "Usage: install-vault [OPTIONS]" echo - echo "This script can be used to install Vault and its dependencies. This script has been tested with Ubuntu 16.04 and Amazon Linux." + echo "This script can be used to install Vault and its dependencies. This script has been tested with Ubuntu 16.04 and Amazon Linux 2." echo echo "Options:" echo @@ -108,59 +105,6 @@ function retry { exit $exit_status } -# Install steps are based on: http://unix.stackexchange.com/a/291098/215969 -function install_supervisord_debian { - sudo apt-get install -y supervisor - sudo update-rc.d supervisor defaults - - create_supervisor_config - sudo systemctl enable supervisor -} - - -function two_way_symlink() { - local src="$1" - local dest="$2" - - if [[ ! -f "$dest" ]] && [[ ! -f "$src" ]]; then - echo "Missing source '$src' AND destination '$dest' exiting..." - exit -5 - fi - - if [[ ! -f "$dest" ]]; then - ## Destination isn't there point it to source - sudo ln -s $src $dest - elif [[ ! -f "$src" ]]; then - ## Source file was missing, point to destination. Should ONLY do so if it doesn't already exist (e.g. hadn't already been dual linked) - sudo ln -s $dest $src - fi - - -} - - -# Install steps are based on: http://stackoverflow.com/a/31576473/483528 -function install_supervisord_amazon_linux { - sudo pip install supervisor - - # On Amazon Linux, /usr/local/bin is not in PATH for the root user, so we add symlinks to /usr/bin, which is in PATH - two_way_symlink "/usr/bin/supervisorctl" "/usr/local/bin/supervisorctl" - two_way_symlink "/usr/bin/supervisord" "/usr/local/bin/supervisord" - - sudo cp "$SCRIPT_DIR/supervisor-initd-script.sh" "/etc/init.d/supervisor" - sudo chmod a+x /etc/init.d/supervisor - sudo mkdir -p /var/log/supervisor - - create_supervisor_config - sudo chkconfig --add supervisor - sudo chkconfig supervisor on -} - -function create_supervisor_config { - sudo mkdir -p "$SUPERVISOR_CONF_DIR" - sudo cp "$SCRIPT_DIR/supervisord.conf" "$SUPERVISOR_DIR/supervisord.conf" -} - function has_yum { [[ -n "$(command -v yum)" ]] } @@ -175,11 +119,9 @@ function install_dependencies { if $(has_apt_get); then sudo apt-get update -y sudo apt-get install -y awscli curl unzip jq - install_supervisord_debian elif $(has_yum); then sudo yum update -y sudo yum install -y awscli curl unzip jq - install_supervisord_amazon_linux else log_error "Could not find apt-get or yum. Cannot install dependencies on this OS." exit 1 @@ -211,7 +153,6 @@ function create_vault_install_paths { sudo mkdir -p "$path/bin" sudo mkdir -p "$path/config" sudo mkdir -p "$path/data" - sudo mkdir -p "$path/log" sudo mkdir -p "$path/tls" sudo mkdir -p "$path/scripts" sudo chmod 755 "$path" diff --git a/modules/install-vault/supervisor-initd-script.sh b/modules/install-vault/supervisor-initd-script.sh deleted file mode 100755 index 171b9161..00000000 --- a/modules/install-vault/supervisor-initd-script.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash -# -# supervisord Startup script for the Supervisor process control system -# -# Author: Mike McGrath (based off yumupdatesd) -# Jason Koppe adjusted to read sysconfig, -# use supervisord tools to start/stop, conditionally wait -# for child processes to shutdown, and startup later -# Erwan Queffelec -# make script LSB-compliant -# -# chkconfig: 345 83 04 -# description: Supervisor is a client/server system that allows \ -# its users to monitor and control a number of processes on \ -# UNIX-like operating systems. -# processname: supervisord -# config: /etc/supervisord.conf -# config: /etc/sysconfig/supervisord -# pidfile: /var/run/supervisord.pid -# -### BEGIN INIT INFO -# Provides: supervisord -# Required-Start: $all -# Required-Stop: $all -# Short-Description: start and stop Supervisor process control system -# Description: Supervisor is a client/server system that allows -# its users to monitor and control a number of processes on -# UNIX-like operating systems. -### END INIT INFO - -# Source function library -. /etc/rc.d/init.d/functions - -# Source system settings -if [ -f /etc/sysconfig/supervisord ]; then - . /etc/sysconfig/supervisord -fi - -# Path to the supervisorctl script, server binary, -# and short-form for messages. -supervisorctl=/usr/local/bin/supervisorctl -supervisord=${SUPERVISORD-/usr/local/bin/supervisord} -prog=supervisord -pidfile=${PIDFILE-/tmp/supervisord.pid} -lockfile=${LOCKFILE-/var/lock/subsys/supervisord} -STOP_TIMEOUT=${STOP_TIMEOUT-60} -OPTIONS="${OPTIONS--c /etc/supervisor/supervisord.conf}" -RETVAL=0 - -start() { - echo -n $"Starting $prog: " - daemon --pidfile=${pidfile} $supervisord $OPTIONS - RETVAL=$? - echo - if [ $RETVAL -eq 0 ]; then - touch ${lockfile} - $supervisorctl $OPTIONS status - fi - return $RETVAL -} - -stop() { - echo -n $"Stopping $prog: " - killproc -p ${pidfile} -d ${STOP_TIMEOUT} $supervisord - RETVAL=$? - echo - [ $RETVAL -eq 0 ] && rm -rf ${lockfile} ${pidfile} -} - -reload() { - echo -n $"Reloading $prog: " - LSB=1 killproc -p $pidfile $supervisord -HUP - RETVAL=$? - echo - if [ $RETVAL -eq 7 ]; then - failure $"$prog reload" - else - $supervisorctl $OPTIONS status - fi -} - -restart() { - stop - start -} - -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p ${pidfile} $supervisord - RETVAL=$? - [ $RETVAL -eq 0 ] && $supervisorctl $OPTIONS status - ;; - restart) - restart - ;; - condrestart|try-restart) - if status -p ${pidfile} $supervisord >&/dev/null; then - stop - start - fi - ;; - force-reload|reload) - reload - ;; - *) - echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|reload}" - RETVAL=2 - esac - - exit $RETVAL \ No newline at end of file diff --git a/modules/install-vault/supervisord.conf b/modules/install-vault/supervisord.conf deleted file mode 100644 index d96beb0c..00000000 --- a/modules/install-vault/supervisord.conf +++ /dev/null @@ -1,39 +0,0 @@ -; supervisor config file -; -; For more information on the config file, please see: -; http://supervisord.org/configuration.html -; -; Notes: -; - Shell expansion ("~" or "$HOME") is not supported. Environment -; variables can be expanded using this syntax: "%(ENV_HOME)s". -; - Comments must have a leading space: "a=b ;comment" not "a=b;comment". - -[unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) -chmod=0700 ; sockef file mode (default 0700) - -[supervisord] -logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) -pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) -childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) -logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) -logfile_backups=10 ; (num of main logfile rotation backups;default 10) -loglevel=info ; (log level;default info; others: debug,warn,trace) - -; the below section must remain in the config file for RPC -; (supervisorctl/web interface) to work, additional interfaces may be -; added by defining them in separate rpcinterface: sections -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[supervisorctl] -serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket - -; The [include] section can just contain the "files" setting. This -; setting can list multiple files (separated by whitespace or -; newlines). It can also contain wildcards. The filenames are -; interpreted as relative to this file. Included files *cannot* -; include files themselves. - -[include] -files = /etc/supervisor/conf.d/*.conf diff --git a/modules/run-vault/README.md b/modules/run-vault/README.md index af36429f..b4a1470c 100644 --- a/modules/run-vault/README.md +++ b/modules/run-vault/README.md @@ -4,7 +4,7 @@ This folder contains a script for configuring and running Vault on an [AWS](http script has been tested on the following operating systems: * Ubuntu 16.04 -* Amazon Linux +* Amazon Linux 2 There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. @@ -27,17 +27,23 @@ This will: See [Vault configuration](#vault-configuration) for details on what this configuration file will contain and how to override it with your own configuration. -1. Generate a [Supervisor](http://supervisord.org/) configuration file called `run-vault.conf` in the Supervisor - config dir (default: `/etc/supervisor/conf.d`) with a command that will run Vault: +1. Generate a [systemd](https://www.freedesktop.org/wiki/Software/systemd/) service file called `vault.service` in the systemd + config dir (default: `/etc/systemd/system`) with a command that will run Vault: `vault server -config=/opt/vault/config`. -1. Tell Supervisor to load the new configuration file, thereby starting Vault. +1. Tell systemd to load the new configuration file, thereby starting Vault. We recommend using the `run-vault` command as part of [User Data](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-shell-scripts), so that it executes -when the EC2 Instance is first booting. After running `run-vault` on that initial boot, the `supervisord` configuration +when the EC2 Instance is first booting. After running `run-vault` on that initial boot, the `systemd` configuration will automatically restart Vault if it crashes or the EC2 instance reboots. +Note that `systemd` logs to its own journal by default. To view the Vault logs, run `journalctl -u vault.service`. To change +the log output location, you can specify the `StandardOutput` and `StandardError` options by using the `--systemd-stdout` and `--systemd-stderr` +options. See the [`systemd.exec` man pages](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput=) for available +options, but note that the `file:path` option requires [systemd version >= 236](https://stackoverflow.com/a/48052152), which is not provided +in the base Ubuntu 16.04 and Amazon Linux 2 images. + See the [vault-cluster-public](https://github.com/hashicorp/terraform-aws-vault/tree/master/examples/vault-cluster-public) and [vault-cluster-private](https://github.com/hashicorp/terraform-aws-vault/tree/master/examples/vault-cluster-private) examples for fully-working sample code. @@ -55,6 +61,8 @@ The `run-vault` script accepts the following arguments: encryption?](#how-do-you_handle-encryption) for more info. * `--port` (optional): The port Vault should listen on. Default is `8200`. * `--log-level` (optional): The log verbosity to use with Vault. Default is `info`. +* `--systemd-stdout` (optional): The StandardOutput option of the systemd unit. If not specified, it will use systemd's default (journal). +* `--systemd-stderr` (optional): The StandardError option of the systemd unit. If not specified, it will use systemd's default (inherit). * `--cluster-port` (optional): The port Vault should listen on for server-to-server communication. Default is `--port + 1`. * `--api-addr`: The full address to use for [Client Redirection](https://www.vaultproject.io/docs/concepts/ha.html#client-redirection) when running Vault in HA mode. Defaults to "https://[instance_ip]:8200". Optional. diff --git a/modules/run-vault/run-vault b/modules/run-vault/run-vault index 40a75d54..451e1b89 100755 --- a/modules/run-vault/run-vault +++ b/modules/run-vault/run-vault @@ -4,7 +4,7 @@ set -e readonly VAULT_CONFIG_FILE="default.hcl" -readonly SUPERVISOR_CONFIG_PATH="/etc/supervisor/conf.d/run-vault.conf" +readonly SYSTEMD_CONFIG_PATH="/etc/systemd/system/vault.service" readonly DEFAULT_PORT=8200 readonly DEFAULT_LOG_LEVEL="info" @@ -29,8 +29,9 @@ function print_usage { echo -e " --api-addr\t\tThe full address to use for Client Redirection when running Vault in HA mode. Defaults to \"https://[instance_ip]:$DEFAULT_PORT\". Optional." echo -e " --config-dir\t\tThe path to the Vault config folder. Optional. Default is the absolute path of '../config', relative to this script." echo -e " --bin-dir\t\tThe path to the folder with Vault binary. Optional. Default is the absolute path of the parent folder of this script." - echo -e " --log-dir\t\tThe path to the Vault log folder. Optional. Default is the absolute path of '../log', relative to this script." echo -e " --log-level\t\tThe log verbosity to use with Vault. Optional. Default is $DEFAULT_LOG_LEVEL." + echo -e " --systemd-stdout\t\tThe StandardOutput option of the systemd unit. Optional. If not configured, uses systemd's default (journal)." + echo -e " --systemd-stderr\t\tThe StandardError option of the systemd unit. Optional. If not configured, uses systemd's default (inherit)." echo -e " --user\t\tThe user to run Vault as. Optional. Default is to use the owner of --config-dir." echo -e " --skip-vault-config\tIf this flag is set, don't generate a Vault configuration file. Optional. Default is false." echo -e " --enable-s3-backend\tIf this flag is set, an S3 backend will be enabled in addition to the HA Consul backend. Default is false." @@ -226,32 +227,79 @@ EOF chown "$user:$user" "$config_path" } -function generate_supervisor_config { - local -r supervisor_config_path="$1" +function generate_systemd_config { + local -r systemd_config_path="$1" local -r vault_config_dir="$2" local -r vault_bin_dir="$3" - local -r vault_log_dir="$4" - local -r vault_log_level="$5" - local -r vault_user="$6" - - log_info "Creating Supervisor config file to run Vault in $supervisor_config_path" - cat > "$supervisor_config_path" < "$systemd_config_path" + echo -e "$service_config" >> "$systemd_config_path" + echo -e "$log_config" >> "$systemd_config_path" + echo -e "$install_config" >> "$systemd_config_path" } function start_vault { - log_info "Reloading Supervisor config and starting Vault" - supervisorctl reread - supervisorctl update + log_info "Reloading systemd config and starting Vault" + sudo systemctl daemon-reload + sudo systemctl restart vault.service } # Based on: http://unix.stackexchange.com/a/7732/215969 @@ -268,8 +316,9 @@ function run { local api_addr="" local config_dir="" local bin_dir="" - local log_dir="" local log_level="$DEFAULT_LOG_LEVEL" + local systemd_stdout="" + local systemd_stderr="" local user="" local skip_vault_config="false" local enable_s3_backend="false" @@ -318,14 +367,19 @@ function run { bin_dir="$2" shift ;; - --log-dir) + --log-level) assert_not_empty "$key" "$2" - log_dir="$2" + log_level="$2" shift ;; - --log-level) + --systemd-stdout) assert_not_empty "$key" "$2" - log_level="$2" + systemd_stdout="$2" + shift + ;; + --systemd-stderr) + assert_not_empty "$key" "$2" + systemd_stderr="$2" shift ;; --user) @@ -384,7 +438,7 @@ function run { assert_not_empty "--s3-bucket-region" "$s3_bucket_region" fi - assert_is_installed "supervisorctl" + assert_is_installed "systemctl" assert_is_installed "aws" assert_is_installed "curl" assert_is_installed "jq" @@ -393,14 +447,12 @@ function run { config_dir=$(cd "$SCRIPT_DIR/../config" && pwd) fi + # If $systemd_stdout and/or $systemd_stderr are empty, we leave them empty so that generate_systemd_config will use systemd's defaults (journal and inherit, respectively) + if [[ -z "$bin_dir" ]]; then bin_dir=$(cd "$SCRIPT_DIR/../bin" && pwd) fi - if [[ -z "$log_dir" ]]; then - log_dir=$(cd "$SCRIPT_DIR/../log" && pwd) - fi - if [[ -z "$user" ]]; then user=$(get_owner_of_path "$config_dir") fi @@ -439,7 +491,7 @@ function run { "$auto_unseal_endpoint" fi - generate_supervisor_config "$SUPERVISOR_CONFIG_PATH" "$config_dir" "$bin_dir" "$log_dir" "$log_level" "$user" + generate_systemd_config "$SYSTEMD_CONFIG_PATH" "$config_dir" "$bin_dir" "$log_level" "$systemd_stdout" "$systemd_stderr" "$user" start_vault } diff --git a/modules/update-certificate-store/README.md b/modules/update-certificate-store/README.md index 81745cfb..52a32c0c 100644 --- a/modules/update-certificate-store/README.md +++ b/modules/update-certificate-store/README.md @@ -5,7 +5,7 @@ store. This allows you to establish TLS connections to services that use TLS cer x509 certificate errors. This script has been tested on the following operating systems: * Ubuntu 16.04 -* Amazon Linux +* Amazon Linux 2 There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. diff --git a/modules/update-certificate-store/update-certificate-store b/modules/update-certificate-store/update-certificate-store index 4311284f..a803b824 100755 --- a/modules/update-certificate-store/update-certificate-store +++ b/modules/update-certificate-store/update-certificate-store @@ -4,7 +4,7 @@ # This script has been tested with the following operating systems: # # 1. Ubuntu 16.04 -# 2. Amazon Linux +# 2. Amazon Linux 2 set -e @@ -19,7 +19,7 @@ function print_usage { echo echo "Usage: update-certificate-store [OPTIONS]" echo - echo "Add a trusted, private CA certificate to an OS's certificate store. This script has been tested with Ubuntu 16.04 and Amazon Linux." + echo "Add a trusted, private CA certificate to an OS's certificate store. This script has been tested with Ubuntu 16.04 and Amazon Linux 2." echo echo "Options:" echo diff --git a/modules/vault-cluster/README.md b/modules/vault-cluster/README.md index bc20ca1f..208f5b3b 100644 --- a/modules/vault-cluster/README.md +++ b/modules/vault-cluster/README.md @@ -340,7 +340,7 @@ NOT actually deploy those new instances. To make that happen, you need to: For each of the standby nodes: 1. SSH to the EC2 Instance where the Vault standby is running. -1. Execute `sudo supervisorctl stop vault` to have Vault shut down gracefully. +1. Execute `sudo systemctl stop vault` to have Vault shut down gracefully. 1. Terminate the EC2 Instance. 1. After a minute or two, the ASG should automatically launch a new Instance, with the new AMI, to replace the old one. 1. Have each Vault admin SSH to the new EC2 Instance and unseal it. @@ -353,7 +353,7 @@ upgraded: 1. SSH to the EC2 Instance where the Vault primary is running. This should be the last server that has the old version of your AMI. -1. Execute `sudo supervisorctl stop vault` to have Vault shut down gracefully. +1. Execute `sudo systemctl stop vault` to have Vault shut down gracefully. 1. Terminate the EC2 Instance. 1. After a minute or two, the ASG should automatically launch a new Instance, with the new AMI, to replace the old one. 1. Have each Vault admin SSH to the new EC2 Instance and unseal it. @@ -366,7 +366,7 @@ upgraded: There are two ways a Vault node may go down: -1. The Vault process may crash. In that case, `supervisor` should restart it automatically. At this point, you will +1. The Vault process may crash. In that case, `systemd` should restart it automatically. At this point, you will need to have each Vault admin SSH to the Instance to unseal it again. 1. The EC2 Instance running Vault dies. In that case, the Auto Scaling Group should launch a replacement automatically. Once again, the Vault admins will have to SSH to the replacement Instance and unseal it. diff --git a/test/vault_helpers.go b/test/vault_helpers.go index bf48b857..68cf6261 100644 --- a/test/vault_helpers.go +++ b/test/vault_helpers.go @@ -41,8 +41,7 @@ const VAULT_CLUSTER_PUBLIC_OUTPUT_ELB_DNS_NAME = "vault_elb_dns_name" var UnsealKeyRegex = regexp.MustCompile("^Unseal Key \\d: (.+)$") -const vaultStdOutLogFilePath = "/opt/vault/log/vault-stdout.log" -const vaultStdErrLogFilePath = "/opt/vault/log/vault-error.log" +const vaultLogFilePath = "/opt/vault/log/vault-journalctl.log" const vaultSyslogPathUbuntu = "/var/log/syslog" const vaultSyslogPathAmazonLinux = "/var/log/messages" const vaultClusterSizeInExamples = 3 @@ -124,6 +123,8 @@ func deployCluster(t *testing.T, amiId string, awsRegion string, examplesDir str } func getVaultLogs(t *testing.T, testId string, terraformOptions *terraform.Options, amiId string, awsRegion string, sshUserName string, keyPair *aws.Ec2Keypair) { + writeOutVaultLogs(t, OUTPUT_VAULT_CLUSTER_ASG_NAME, sshUserName, terraformOptions, awsRegion, keyPair) + asgName := terraform.OutputRequired(t, terraformOptions, OUTPUT_VAULT_CLUSTER_ASG_NAME) sysLogPath := vaultSyslogPathUbuntu @@ -131,13 +132,12 @@ func getVaultLogs(t *testing.T, testId string, terraformOptions *terraform.Optio sysLogPath = vaultSyslogPathAmazonLinux } - instanceIdToFilePathToContents := aws.FetchContentsOfFilesFromAsg(t, awsRegion, sshUserName, keyPair, asgName, true, vaultStdOutLogFilePath, vaultStdErrLogFilePath, sysLogPath) + instanceIdToFilePathToContents := aws.FetchContentsOfFilesFromAsg(t, awsRegion, sshUserName, keyPair, asgName, true, vaultLogFilePath, sysLogPath) require.Len(t, instanceIdToFilePathToContents, vaultClusterSizeInExamples) for instanceID, filePathToContents := range instanceIdToFilePathToContents { - require.Contains(t, filePathToContents, vaultStdOutLogFilePath) - require.Contains(t, filePathToContents, vaultStdErrLogFilePath) + require.Contains(t, filePathToContents, vaultLogFilePath) require.Contains(t, filePathToContents, sysLogPath) localDestDir := filepath.Join("/tmp/logs/", testId, amiId, instanceID) @@ -145,12 +145,24 @@ func getVaultLogs(t *testing.T, testId string, terraformOptions *terraform.Optio os.MkdirAll(localDestDir, 0755) } - writeLogFile(t, filePathToContents[vaultStdOutLogFilePath], filepath.Join(localDestDir, "vaultStdOut.log")) - writeLogFile(t, filePathToContents[vaultStdErrLogFilePath], filepath.Join(localDestDir, "vaultStdErr.log")) + writeLogFile(t, filePathToContents[vaultLogFilePath], filepath.Join(localDestDir, "vault-journalctl.log")) writeLogFile(t, filePathToContents[sysLogPath], filepath.Join(localDestDir, "syslog.log")) } } +// Write out the Vault logs from journalctl into a file. This is mainly used for debugging purposes. +func writeOutVaultLogs(t *testing.T, asgNameOutputVar string, sshUserName string, terraformOptions *terraform.Options, awsRegion string, keyPair *aws.Ec2Keypair) { + cluster := findVaultClusterNodes(t, asgNameOutputVar, sshUserName, terraformOptions, awsRegion, keyPair) + + for _, node := range cluster.Nodes() { + output := retry.DoWithRetry(t, "Writing out Vault logs from journalctl to file", 1, 10*time.Second, func() (string, error) { + return ssh.CheckSshCommandE(t, node, fmt.Sprintf("sudo -u vault mkdir -p /opt/vault/log && journalctl -u vault.service | sudo -u vault tee %s > /dev/null", vaultLogFilePath)) + }) + logger.Logf(t, "Output from journalctl command on %s: %s", node.Hostname, output) + } + +} + // Initialize the Vault cluster and unseal each of the nodes by connecting to them over SSH and executing Vault // commands. The reason we use SSH rather than using the Vault client remotely is we want to verify that the // self-signed TLS certificate is properly configured on each server so when you're on that server, you don't @@ -247,7 +259,7 @@ func initializeVault(t *testing.T, vaultCluster *VaultCluster) { func restartVault(t *testing.T, host ssh.Host) { description := fmt.Sprintf("Restarting vault on host %s", host.Hostname) retry.DoWithRetry(t, description, 10, 10*time.Second, func() (string, error) { - return ssh.CheckSshCommandE(t, host, "sudo supervisorctl restart vault") + return ssh.CheckSshCommandE(t, host, "sudo systemctl restart vault.service") }) } diff --git a/test/vault_main_test.go b/test/vault_main_test.go index 560bac6b..629d5ee1 100644 --- a/test/vault_main_test.go +++ b/test/vault_main_test.go @@ -26,9 +26,9 @@ type amiData struct { var amisData = []amiData{ {"vaultEnterpriseUbuntu", "ubuntu16-ami", "ubuntu", true}, - {"vaultEnterpriseAmazonLinux", "amazon-linux-ami", "ec2-user", true}, + {"vaultEnterpriseAmazonLinux", "amazon-linux-2-ami", "ec2-user", true}, {"vaultUbuntu", "ubuntu16-ami", "ubuntu", false}, - {"vaultAmazonLinux", "amazon-linux-ami", "ec2-user", false}, + {"vaultAmazonLinux", "amazon-linux-2-ami", "ec2-user", false}, } var testCases = []testCase{