From 35c1402de36244671e3beb74a8f8f13acc2e8e4e Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Fri, 8 Sep 2023 11:34:35 +0200 Subject: [PATCH 01/13] Improved Docker images (#6080) The current Docker image provided with `aiida-core` depends on `aiida-prerequisites` as a base image. This image is maintained outside of the `aiida-core` repo, making it additional maintenance to keep it up to date when a new `aiida-core` version is released. In addition, the `aiida-prerequisites` image is no longer maintained because the AiiDAlab stack now depends on another base image. Finally, the `aiida-prerequisites` design had shortcomings as to how the required services, PostgreSQL and RabbitMQ, are handled. They had to be started manually and were not cleanly stopped on container shutdown. An AEP was submitted to add two Docker images to `aiida-core` that simplifies their maintenance and that improve the usability by properly and automatically handling the services. See the AEP for more details: https://aep.readthedocs.io/en/latest/009_improved_docker_images/readme.html Cherry-pick: 9e808347b605897c5ff87b854e35144a6a1adbdb --- .docker/README.md | 21 +++ .docker/aiida-core-base/Dockerfile | 174 ++++++++++++++++++ .docker/aiida-core-base/fix-permissions | 35 ++++ .docker/aiida-core-base/initial-condarc | 6 + .../s6-assets/config-quick-setup.yaml | 15 ++ .../s6-assets/init/aiida-prepare.sh} | 80 ++++---- .../dependencies.d/aiida-prepare | 0 .../aiida-daemon-start/dependencies.d/base | 0 .../s6-assets/s6-rc.d/aiida-daemon-start/down | 1 + .../s6-rc.d/aiida-daemon-start/timeout-up | 1 + .../s6-assets/s6-rc.d/aiida-daemon-start/type | 1 + .../s6-assets/s6-rc.d/aiida-daemon-start/up | 3 + .../s6-rc.d/aiida-prepare/dependencies.d/base | 0 .../s6-rc.d/aiida-prepare/timeout-up | 1 + .../s6-assets/s6-rc.d/aiida-prepare/type | 1 + .../s6-assets/s6-rc.d/aiida-prepare/up | 4 + .../user/contents.d/aiida-daemon-start | 0 .../s6-rc.d/user/contents.d/aiida-prepare | 0 .docker/aiida-core-with-services/Dockerfile | 42 +++++ .../s6-assets/config-quick-setup.yaml | 3 + .../s6-assets/init/postgresql-init.sh | 24 +++ .../s6-assets/init/postgresql-prepare.sh | 8 + .../s6-assets/init/rabbitmq-init.sh | 25 +++ .../s6-rc.d/aiida-prepare/dependencies.d/base | 0 .../aiida-prepare/dependencies.d/postgresql | 0 .../dependencies.d/postgresql-prepare | 0 .../postgresql-init/dependencies.d/base | 0 .../s6-rc.d/postgresql-init/timeout-up | 1 + .../s6-assets/s6-rc.d/postgresql-init/type | 1 + .../s6-assets/s6-rc.d/postgresql-init/up | 6 + .../postgresql-prepare/dependencies.d/base | 0 .../s6-rc.d/postgresql-prepare/timeout-up | 1 + .../s6-assets/s6-rc.d/postgresql-prepare/type | 1 + .../s6-assets/s6-rc.d/postgresql-prepare/up | 6 + .../s6-rc.d/postgresql/dependencies.d/base | 0 .../postgresql/dependencies.d/postgresql-init | 0 .../s6-assets/s6-rc.d/postgresql/down | 1 + .../s6-assets/s6-rc.d/postgresql/timeout-up | 1 + .../s6-assets/s6-rc.d/postgresql/type | 1 + .../s6-assets/s6-rc.d/postgresql/up | 5 + .../s6-rc.d/rabbitmq-init/dependencies.d/base | 0 .../s6-rc.d/rabbitmq-init/timeout-up | 1 + .../s6-assets/s6-rc.d/rabbitmq-init/type | 1 + .../s6-assets/s6-rc.d/rabbitmq-init/up | 5 + .../s6-rc.d/rabbitmq/dependencies.d/base | 0 .../rabbitmq/dependencies.d/rabbitmq-init | 0 .../s6-assets/s6-rc.d/rabbitmq/down-signal | 1 + .../s6-assets/s6-rc.d/rabbitmq/run | 6 + .../s6-assets/s6-rc.d/rabbitmq/type | 1 + .../s6-rc.d/user/contents.d/aiida-prepare | 0 .../s6-rc.d/user/contents.d/postgresql | 0 .../s6-rc.d/user/contents.d/postgresql-init | 0 .../user/contents.d/postgresql-prepare | 0 .../s6-rc.d/user/contents.d/rabbitmq | 0 .../s6-rc.d/user/contents.d/rabbitmq-init | 0 .docker/build.json | 13 ++ .docker/docker-bake.hcl | 67 +++++++ .docker/docker-compose.aiida-core-base.yml | 49 +++++ ...ocker-compose.aiida-core-with-services.yml | 15 ++ .docker/docker-rabbitmq.yml | 34 ---- .docker/my_init.d/configure-aiida.sh | 4 - .docker/pytest.ini | 5 + .docker/requirements.txt | 8 + .docker/tests/conftest.py | 61 ++++++ .docker/tests/test_aiida.py | 32 ++++ .dockerignore | 13 -- .github/actions/create-dev-env/action.yml | 27 +++ .github/actions/load-image/action.yml | 31 ++++ .../workflows/build_and_test_docker_on_pr.yml | 59 ------ .../workflows/docker-build-test-upload.yml | 64 +++++++ .github/workflows/docker-merge-tags.yml | 66 +++++++ .github/workflows/docker-push.yml | 96 ++++++++++ .github/workflows/docker.yml | 101 ++++++++++ .github/workflows/push_image_to_dockerhub.yml | 54 ------ .gitignore | 3 + .pre-commit-config.yaml | 9 +- Dockerfile | 23 --- docs/source/intro/run_docker.rst | 50 ++--- pyproject.toml | 2 +- 79 files changed, 1114 insertions(+), 256 deletions(-) create mode 100644 .docker/README.md create mode 100644 .docker/aiida-core-base/Dockerfile create mode 100644 .docker/aiida-core-base/fix-permissions create mode 100644 .docker/aiida-core-base/initial-condarc create mode 100644 .docker/aiida-core-base/s6-assets/config-quick-setup.yaml rename .docker/{opt/configure-aiida.sh => aiida-core-base/s6-assets/init/aiida-prepare.sh} (56%) create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/aiida-prepare create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/base create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/down create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/type create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/timeout-up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/type create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/aiida-daemon-start create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/aiida-prepare create mode 100644 .docker/aiida-core-with-services/Dockerfile create mode 100644 .docker/aiida-core-with-services/s6-assets/config-quick-setup.yaml create mode 100755 .docker/aiida-core-with-services/s6-assets/init/postgresql-init.sh create mode 100755 .docker/aiida-core-with-services/s6-assets/init/postgresql-prepare.sh create mode 100755 .docker/aiida-core-with-services/s6-assets/init/rabbitmq-init.sh create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql-prepare create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/dependencies.d/base create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/type create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/dependencies.d/base create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/type create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/base create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/postgresql-init create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/down create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/timeout-up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/type create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/dependencies.d/base create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/type create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/up create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/base create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/rabbitmq-init create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/type create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/aiida-prepare create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-init create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-prepare create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq create mode 100644 .docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq-init create mode 100644 .docker/build.json create mode 100644 .docker/docker-bake.hcl create mode 100644 .docker/docker-compose.aiida-core-base.yml create mode 100644 .docker/docker-compose.aiida-core-with-services.yml delete mode 100644 .docker/docker-rabbitmq.yml delete mode 100755 .docker/my_init.d/configure-aiida.sh create mode 100644 .docker/pytest.ini create mode 100644 .docker/requirements.txt create mode 100644 .docker/tests/conftest.py create mode 100644 .docker/tests/test_aiida.py delete mode 100644 .dockerignore create mode 100644 .github/actions/create-dev-env/action.yml create mode 100644 .github/actions/load-image/action.yml delete mode 100644 .github/workflows/build_and_test_docker_on_pr.yml create mode 100644 .github/workflows/docker-build-test-upload.yml create mode 100644 .github/workflows/docker-merge-tags.yml create mode 100644 .github/workflows/docker-push.yml create mode 100644 .github/workflows/docker.yml delete mode 100644 .github/workflows/push_image_to_dockerhub.yml delete mode 100644 Dockerfile diff --git a/.docker/README.md b/.docker/README.md new file mode 100644 index 0000000000..c3a8f2caf1 --- /dev/null +++ b/.docker/README.md @@ -0,0 +1,21 @@ +# AiiDA docker stacks + +### Build images locally + +To build the images, run `docker buildx bake -f build.json -f docker-bake.hcl --load` (tested with *docker buildx* version v0.8.2). + +The build system will attempt to detect the local architecture and automatically build images for it (tested with amd64 and arm64). +You can also specify a custom platform with the `--platform`, example: `docker buildx bake -f build.json -f docker-bake.hcl --set *.platform=linux/amd64 --load`. + +### Test the build images locally + +Run + +```bash +TAG=newly-baked python -m pytest -s tests +``` + +### Trigger a build on ghcr.io and dockerhub + +Only the PR open to the organization repository will trigger a build on ghcr.io. +Push to dockerhub is triggered when making a release on github. diff --git a/.docker/aiida-core-base/Dockerfile b/.docker/aiida-core-base/Dockerfile new file mode 100644 index 0000000000..17307203ec --- /dev/null +++ b/.docker/aiida-core-base/Dockerfile @@ -0,0 +1,174 @@ +# syntax=docker/dockerfile:1 + +# Inspired by jupyter's docker-stacks-fundation image: +# https://github.com/jupyter/docker-stacks/blob/main/docker-stacks-foundation/Dockerfile + +ARG BASE=ubuntu:22.04 + +FROM $BASE + +LABEL maintainer="AiiDA Team " + +ARG SYSTEM_USER="aiida" +ARG SYSTEM_UID="1000" +ARG SYSTEM_GID="100" + + +# Fix: https://github.com/hadolint/hadolint/wiki/DL4006 +# Fix: https://github.com/koalaman/shellcheck/wiki/SC3014 +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +USER root + +ENV SYSTEM_USER="${SYSTEM_USER}" + +# Install all OS dependencies for notebook server that starts but lacks all +# features (e.g., download as all possible file formats) +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update --yes && \ + # - apt-get upgrade is run to patch known vulnerabilities in apt-get packages as + # the ubuntu base image is rebuilt too seldom sometimes (less than once a month) + apt-get upgrade --yes && \ + apt-get install --yes --no-install-recommends \ + # - bzip2 is necessary to extract the micromamba executable. + bzip2 \ + # - xz-utils is necessary to extract the s6-overlay. + xz-utils \ + ca-certificates \ + locales \ + sudo \ + # development tools + git \ + openssh-client \ + vim \ + # the gcc compiler need to build some python packages e.g. psutil and pymatgen + build-essential \ + wget && \ + apt-get clean && rm -rf /var/lib/apt/lists/* && \ + echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \ + locale-gen + +# Install s6-overlay to handle startup and shutdown of services +ARG S6_OVERLAY_VERSION=3.1.5.0 +RUN wget --progress=dot:giga -O /tmp/s6-overlay-noarch.tar.xz \ + "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz" && \ + tar -C / -Jxpf /tmp/s6-overlay-noarch.tar.xz && \ + rm /tmp/s6-overlay-noarch.tar.xz + +RUN set -x && \ + arch=$(uname -m) && \ + wget --progress=dot:giga -O /tmp/s6-overlay-binary.tar.xz \ + "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${arch}.tar.xz" && \ + tar -C / -Jxpf /tmp/s6-overlay-binary.tar.xz && \ + rm /tmp/s6-overlay-binary.tar.xz + +# Configure environment +ENV CONDA_DIR=/opt/conda \ + SHELL=/bin/bash \ + SYSTEM_USER="${SYSTEM_USER}" \ + SYSTEM_UID=${SYSTEM_UID} \ + SYSTEM_GID=${SYSTEM_GID} \ + LC_ALL=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + LANGUAGE=en_US.UTF-8 +ENV PATH="${CONDA_DIR}/bin:${PATH}" \ + HOME="/home/${SYSTEM_USER}" + + +# Copy a script that we will use to correct permissions after running certain commands +COPY fix-permissions /usr/local/bin/fix-permissions +RUN chmod a+rx /usr/local/bin/fix-permissions + +# Enable prompt color in the skeleton .bashrc before creating the default SYSTEM_USER +# hadolint ignore=SC2016 +RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc && \ + # Add call to conda init script see https://stackoverflow.com/a/58081608/4413446 + echo 'eval "$(command conda shell.bash hook 2> /dev/null)"' >> /etc/skel/.bashrc + +# Create SYSTEM_USER with name jovyan user with UID=1000 and in the 'users' group +# and make sure these dirs are writable by the `users` group. +RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \ + sed -i.bak -e 's/^%admin/#%admin/' /etc/sudoers && \ + sed -i.bak -e 's/^%sudo/#%sudo/' /etc/sudoers && \ + useradd -l -m -s /bin/bash -N -u "${SYSTEM_UID}" "${SYSTEM_USER}" && \ + mkdir -p "${CONDA_DIR}" && \ + chown "${SYSTEM_USER}:${SYSTEM_GID}" "${CONDA_DIR}" && \ + chmod g+w /etc/passwd && \ + fix-permissions "${HOME}" && \ + fix-permissions "${CONDA_DIR}" + +USER ${SYSTEM_UID} + +# Pin python version here +ARG PYTHON_VERSION + +# Download and install Micromamba, and initialize Conda prefix. +# +# Similar projects using Micromamba: +# - Micromamba-Docker: +# - repo2docker: +# Install Python, Mamba and jupyter_core +# Cleanup temporary files and remove Micromamba +# Correct permissions +# Do all this in a single RUN command to avoid duplicating all of the +# files across image layers when the permissions change +COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" initial-condarc "${CONDA_DIR}/.condarc" +WORKDIR /tmp +RUN set -x && \ + arch=$(uname -m) && \ + if [ "${arch}" = "x86_64" ]; then \ + # Should be simpler, see + arch="64"; \ + fi && \ + wget --progress=dot:giga -O /tmp/micromamba.tar.bz2 \ + "https://micromamba.snakepit.net/api/micromamba/linux-${arch}/latest" && \ + tar -xvjf /tmp/micromamba.tar.bz2 --strip-components=1 bin/micromamba && \ + rm /tmp/micromamba.tar.bz2 && \ + PYTHON_SPECIFIER="python=${PYTHON_VERSION}" && \ + if [[ "${PYTHON_VERSION}" == "default" ]]; then PYTHON_SPECIFIER="python"; fi && \ + # Install the packages + ./micromamba install \ + --root-prefix="${CONDA_DIR}" \ + --prefix="${CONDA_DIR}" \ + --yes \ + "${PYTHON_SPECIFIER}" \ + 'mamba' && \ + rm micromamba && \ + # Pin major.minor version of python + mamba list python | grep '^python ' | tr -s ' ' | cut -d ' ' -f 1,2 >> "${CONDA_DIR}/conda-meta/pinned" && \ + mamba clean --all -f -y && \ + fix-permissions "${CONDA_DIR}" && \ + fix-permissions "/home/${SYSTEM_USER}" + +# Add ~/.local/bin to PATH where the dependencies get installed via pip +# This require the package installed with `--user` flag in pip +ENV PATH=${PATH}:/home/${NB_USER}/.local/bin + +# Switch to root to install AiiDA and set AiiDA as service +# Install AiiDA from source code +USER root +COPY --from=src . /tmp/aiida-core +RUN pip install /tmp/aiida-core --no-cache-dir && \ + rm -rf /tmp/aiida-core + +# Enable verdi autocompletion. +RUN mkdir -p "${CONDA_DIR}/etc/conda/activate.d" && \ + echo 'eval "$(_VERDI_COMPLETE=bash_source verdi)"' >> "${CONDA_DIR}/etc/conda/activate.d/activate_aiida_autocompletion.sh" && \ + chmod +x "${CONDA_DIR}/etc/conda/activate.d/activate_aiida_autocompletion.sh" && \ + fix-permissions "${CONDA_DIR}" + +# COPY AiiDA profile configuration for profile setup init script +COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" s6-assets/config-quick-setup.yaml "/aiida/assets/config-quick-setup.yaml" +COPY s6-assets/s6-rc.d /etc/s6-overlay/s6-rc.d +COPY s6-assets/init /etc/init + +# Otherwise will stuck on oneshot services +# https://github.com/just-containers/s6-overlay/issues/467 +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 + +# Switch back to USER aiida to avoid accidental container runs as root +USER ${SYSTEM_UID} + +ENTRYPOINT ["/init"] + +WORKDIR "${HOME}" diff --git a/.docker/aiida-core-base/fix-permissions b/.docker/aiida-core-base/fix-permissions new file mode 100644 index 0000000000..840173c605 --- /dev/null +++ b/.docker/aiida-core-base/fix-permissions @@ -0,0 +1,35 @@ +#!/bin/bash +# This is brought from jupyter docker-stacks: +# https://github.com/jupyter/docker-stacks/blob/main/docker-stacks-foundation/fix-permissions +# set permissions on a directory +# after any installation, if a directory needs to be (human) user-writable, +# run this script on it. +# It will make everything in the directory owned by the group ${SYSTEM_GID} +# and writable by that group. + +# uses find to avoid touching files that already have the right permissions, +# which would cause massive image explosion + +# right permissions are: +# group=${SYSEM_GID} +# AND permissions include group rwX (directory-execute) +# AND directories have setuid,setgid bits set + +set -e + +for d in "$@"; do + find "${d}" \ + ! \( \ + -group "${SYSTEM_GID}" \ + -a -perm -g+rwX \ + \) \ + -exec chgrp "${SYSTEM_GID}" -- {} \+ \ + -exec chmod g+rwX -- {} \+ + # setuid, setgid *on directories only* + find "${d}" \ + \( \ + -type d \ + -a ! -perm -6000 \ + \) \ + -exec chmod +6000 -- {} \+ +done diff --git a/.docker/aiida-core-base/initial-condarc b/.docker/aiida-core-base/initial-condarc new file mode 100644 index 0000000000..383aad3cb0 --- /dev/null +++ b/.docker/aiida-core-base/initial-condarc @@ -0,0 +1,6 @@ +# Conda configuration see https://conda.io/projects/conda/en/latest/configuration.html + +auto_update_conda: false +show_channel_urls: true +channels: + - conda-forge diff --git a/.docker/aiida-core-base/s6-assets/config-quick-setup.yaml b/.docker/aiida-core-base/s6-assets/config-quick-setup.yaml new file mode 100644 index 0000000000..f910069e1d --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/config-quick-setup.yaml @@ -0,0 +1,15 @@ +--- +db_engine: postgresql_psycopg2 +db_backend: core.psql_dos +db_host: database +db_port: 5432 +su_db_username: postgres +su_db_password: password +su_db_name: template1 +db_name: aiida_db +db_username: aiida +db_password: password +broker_host: messaging +broker_port: 5672 +broker_username: guest +broker_password: guest diff --git a/.docker/opt/configure-aiida.sh b/.docker/aiida-core-base/s6-assets/init/aiida-prepare.sh similarity index 56% rename from .docker/opt/configure-aiida.sh rename to .docker/aiida-core-base/s6-assets/init/aiida-prepare.sh index 92aa3ab45d..690ebff536 100755 --- a/.docker/opt/configure-aiida.sh +++ b/.docker/aiida-core-base/s6-assets/init/aiida-prepare.sh @@ -2,17 +2,20 @@ # This script is executed whenever the docker container is (re)started. -# Debugging. -set -x - # Environment. export SHELL=/bin/bash -# Setup AiiDA autocompletion. -grep _VERDI_COMPLETE /home/${SYSTEM_USER}/.bashrc &> /dev/null || echo 'eval "$(_VERDI_COMPLETE=source verdi)"' >> /home/${SYSTEM_USER}/.bashrc +# Configure AiiDA. +export SETUP_DEFAULT_AIIDA_PROFILE=true +export AIIDA_PROFILE_NAME=default +export AIIDA_USER_EMAIL=aiida@localhost +export AIIDA_USER_FIRST_NAME=Giuseppe +export AIIDA_USER_LAST_NAME=Verdi +export AIIDA_USER_INSTITUTION=Khedivial +export AIIDA_PROFILE_PATH=/aiida/assets/config-quick-setup.yaml # Check if user requested to set up AiiDA profile (and if it exists already) -if [[ ${SETUP_DEFAULT_PROFILE} == true ]] && ! verdi profile show ${PROFILE_NAME} &> /dev/null; then +if [[ ${SETUP_DEFAULT_AIIDA_PROFILE} == true ]] && ! verdi profile show ${AIIDA_PROFILE_NAME} &> /dev/null; then NEED_SETUP_PROFILE=true; else NEED_SETUP_PROFILE=false; @@ -22,15 +25,23 @@ fi if [[ ${NEED_SETUP_PROFILE} == true ]]; then # Create AiiDA profile. - verdi quicksetup \ - --non-interactive \ - --profile "${PROFILE_NAME}" \ - --email "${USER_EMAIL}" \ - --first-name "${USER_FIRST_NAME}" \ - --last-name "${USER_LAST_NAME}" \ - --institution "${USER_INSTITUTION}" \ - --db-host "${DB_HOST:localhost}" \ - --broker-host "${BROKER_HOST:localhost}" + verdi quicksetup \ + --non-interactive \ + --profile "${AIIDA_PROFILE_NAME}" \ + --email "${AIIDA_USER_EMAIL}" \ + --first-name "${AIIDA_USER_FIRST_NAME}" \ + --last-name "${AIIDA_USER_LAST_NAME}" \ + --institution "${AIIDA_USER_INSTITUTION}" \ + --config "${AIIDA_PROFILE_PATH}" + + # Supress verdi version warning because we are using a development version + verdi config set warnings.development_version False + + # Supress rabbitmq version warning + # If it is built using RMQ version > 3.8.15 (as we did for the `aiida-core` image) which has the issue as described in + # https://github.com/aiidateam/aiida-core/wiki/RabbitMQ-version-to-use + # We explicitly set consumer_timeout to 100 hours in /etc/rabbitmq/rabbitmq.conf + verdi config set warnings.rabbitmq_version False # Setup and configure local computer. computer_name=localhost @@ -52,18 +63,18 @@ if [[ ${NEED_SETUP_PROFILE} == true ]]; then exit 1 fi - verdi computer show ${computer_name} || verdi computer setup \ - --non-interactive \ - --label "${computer_name}" \ - --description "this computer" \ - --hostname "${computer_name}" \ + verdi computer show ${computer_name} &> /dev/null || verdi computer setup \ + --non-interactive \ + --label "${computer_name}" \ + --description "container computer" \ + --hostname "${computer_name}" \ --transport core.local \ - --scheduler core.direct \ - --work-dir /home/aiida/aiida_run/ \ - --mpirun-command "mpirun -np {tot_num_mpiprocs}" \ - --mpiprocs-per-machine ${LOCALHOST_MPI_PROCS_PER_MACHINE} && \ - verdi computer configure core.local "${computer_name}" \ - --non-interactive \ + --scheduler core.direct \ + --work-dir /home/${SYSTEM_USER}/aiida_run/ \ + --mpirun-command "mpirun -np {tot_num_mpiprocs}" \ + --mpiprocs-per-machine ${LOCALHOST_MPI_PROCS_PER_MACHINE} && \ + verdi computer configure core.local "${computer_name}" \ + --non-interactive \ --safe-interval 0.0 fi @@ -71,20 +82,5 @@ fi # Show the default profile verdi profile show || echo "The default profile is not set." -# Make sure that the daemon is not running, otherwise the migration will abort. -verdi daemon stop - # Migration will run for the default profile. -verdi storage migrate --force || echo "Database migration failed." - -# Supress rabbitmq version warning for arm64 since -# the it build using latest version rabbitmq from apt install -# We explicitly set consumer_timeout to 100 hours in /etc/rabbitmq/rabbitmq.conf -export ARCH=`uname -m` -if [ "$ARCH" = "aarch64" ]; then \ - verdi config set warnings.rabbitmq_version False -fi - - -# Daemon will start only if the database exists and is migrated to the latest version. -verdi daemon start || echo "AiiDA daemon is not running." +verdi storage migrate --force diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/aiida-prepare b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/base b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/down b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/down new file mode 100644 index 0000000000..b8a14495ad --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/down @@ -0,0 +1 @@ +verdi daemon stop diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/type b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/up b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/up new file mode 100644 index 0000000000..12f199a2b3 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/up @@ -0,0 +1,3 @@ +#!/command/execlineb -S0 + +verdi daemon start diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/timeout-up b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/type b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up new file mode 100644 index 0000000000..60e82d7e43 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up @@ -0,0 +1,4 @@ +#!/command/execlineb -S0 + +foreground { s6-echo "Calling /etc/init/aiida-prepare" } +/etc/init/aiida-prepare.sh diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/aiida-daemon-start b/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/aiida-daemon-start new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/aiida-prepare b/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/Dockerfile b/.docker/aiida-core-with-services/Dockerfile new file mode 100644 index 0000000000..896645d549 --- /dev/null +++ b/.docker/aiida-core-with-services/Dockerfile @@ -0,0 +1,42 @@ +# syntax=docker/dockerfile:1 +FROM aiida-core-base + +LABEL maintainer="AiiDA Team " + +USER root +WORKDIR /opt/ + +ARG PGSQL_VERSION +ARG RMQ_VERSION + +ENV PGSQL_VERSION=${PGSQL_VERSION} +ENV RMQ_VERSION=${RMQ_VERSION} + +RUN mamba install --yes \ + --channel conda-forge \ + postgresql=${PGSQL_VERSION} && \ + mamba clean --all -f -y && \ + fix-permissions "${CONDA_DIR}" && \ + fix-permissions "/home/${SYSTEM_USER}" + +# Install erlang. +RUN apt-get update --yes && \ + apt-get install --yes --no-install-recommends \ + erlang \ + xz-utils && \ + apt-get clean && rm -rf /var/lib/apt/lists/* && \ + # Install rabbitmq. + wget -c --no-check-certificate https://github.com/rabbitmq/rabbitmq-server/releases/download/v${RMQ_VERSION}/rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ + tar -xf rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ + rm rabbitmq-server-generic-unix-${RMQ_VERSION}.tar.xz && \ + ln -sf /opt/rabbitmq_server-${RMQ_VERSION}/sbin/* /usr/local/bin/ && \ + fix-permissions /opt/rabbitmq_server-${RMQ_VERSION} + +# s6-overlay to start services +COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" s6-assets/config-quick-setup.yaml "/aiida/assets/config-quick-setup.yaml" +COPY s6-assets/s6-rc.d /etc/s6-overlay/s6-rc.d +COPY s6-assets/init /etc/init + +USER ${SYSTEM_UID} + +WORKDIR "/home/${SYSTEM_USER}" diff --git a/.docker/aiida-core-with-services/s6-assets/config-quick-setup.yaml b/.docker/aiida-core-with-services/s6-assets/config-quick-setup.yaml new file mode 100644 index 0000000000..24c516270d --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/config-quick-setup.yaml @@ -0,0 +1,3 @@ +--- +db_name: aiida_db +db_username: aiida diff --git a/.docker/aiida-core-with-services/s6-assets/init/postgresql-init.sh b/.docker/aiida-core-with-services/s6-assets/init/postgresql-init.sh new file mode 100755 index 0000000000..0d3556f453 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/init/postgresql-init.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# make DB directory, if not existent +if [ ! -d /home/${SYSTEM_USER}/.postgresql ]; then + mkdir /home/${SYSTEM_USER}/.postgresql + initdb -D /home/${SYSTEM_USER}/.postgresql + echo "unix_socket_directories = '/tmp'" >> /home/${SYSTEM_USER}/.postgresql/postgresql.conf +fi + +PSQL_STATUS_CMD="pg_ctl -D /home/${SYSTEM_USER}/.postgresql status" + +# Fix problem with kubernetes cluster that adds rws permissions to the group +# for more details see: https://github.com/materialscloud-org/aiidalab-z2jh-eosc/issues/5 +chmod g-rwxs /home/${SYSTEM_USER}/.postgresql -R + +# stores return value in $? +running=true +${PSQL_STATUS_CMD} > /dev/null 2>&1 || running=false + +# Postgresql was probably not shutdown properly. Cleaning up the mess... +if ! $running ; then + echo "" > /home/${SYSTEM_USER}/.postgresql/logfile # empty log files + rm -vf /home/${SYSTEM_USER}/.postgresql/postmaster.pid +fi diff --git a/.docker/aiida-core-with-services/s6-assets/init/postgresql-prepare.sh b/.docker/aiida-core-with-services/s6-assets/init/postgresql-prepare.sh new file mode 100755 index 0000000000..580ee47106 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/init/postgresql-prepare.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +PG_ISREADY=1 +while [ "$PG_ISREADY" != "0" ]; do + sleep 1 + pg_isready --quiet + PG_ISREADY=$? +done diff --git a/.docker/aiida-core-with-services/s6-assets/init/rabbitmq-init.sh b/.docker/aiida-core-with-services/s6-assets/init/rabbitmq-init.sh new file mode 100755 index 0000000000..f4c6a0766f --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/init/rabbitmq-init.sh @@ -0,0 +1,25 @@ +#!/bin/bash +RABBITMQ_DATA_DIR="/home/${SYSTEM_USER}/.rabbitmq" + +mkdir -p "${RABBITMQ_DATA_DIR}" +fix-permissions "${RABBITMQ_DATA_DIR}" + +# Fix issue where the erlang cookie permissions are corrupted. +chmod 400 "/home/${SYSTEM_USER}/.erlang.cookie" || echo "erlang cookie not created yet." + +# Set base directory for RabbitMQ to persist its data. This needs to be set to a folder in the system user's home +# directory as that is the only folder that is persisted outside of the container. +RMQ_ETC_DIR="/opt/rabbitmq_server-${RMQ_VERSION}/etc/rabbitmq" +echo MNESIA_BASE="${RABBITMQ_DATA_DIR}" >> "${RMQ_ETC_DIR}/rabbitmq-env.conf" +echo LOG_BASE="${RABBITMQ_DATA_DIR}/log" >> "${RMQ_ETC_DIR}/rabbitmq-env.conf" + +# using workaround from https://github.com/aiidateam/aiida-core/wiki/RabbitMQ-version-to-use +# set timeout to 100 hours +echo "consumer_timeout=3600000" >> "${RMQ_ETC_DIR}/rabbitmq.conf" + +# Explicitly define the node name. This is necessary because the mnesia subdirectory contains the hostname, which by +# default is set to the value of $(hostname -s), which for docker containers, will be a random hexadecimal string. Upon +# restart, this will be different and so the original mnesia folder with the persisted data will not be found. The +# reason RabbitMQ is built this way is through this way it allows to run multiple nodes on a single machine each with +# isolated mnesia directories. Since in the AiiDA setup we only need and run a single node, we can simply use localhost. +echo NODENAME=rabbit@localhost >> "${RMQ_ETC_DIR}/rabbitmq-env.conf" diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql-prepare b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/aiida-prepare/dependencies.d/postgresql-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/dependencies.d/base b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/type b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/up new file mode 100644 index 0000000000..6fc0f06f57 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-init/up @@ -0,0 +1,6 @@ +#!/command/execlineb -S0 + +with-contenv + +foreground { s6-echo "Calling /etc/init/postgresql-init" } +/etc/init/postgresql-init.sh diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/dependencies.d/base b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/type b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/up new file mode 100644 index 0000000000..df5f5f83f9 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql-prepare/up @@ -0,0 +1,6 @@ +#!/command/execlineb -S0 + +with-contenv + +foreground { s6-echo "Calling /etc/init/postgresql-prepare" } +/etc/init/postgresql-prepare.sh diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/base b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/postgresql-init b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/dependencies.d/postgresql-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/down b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/down new file mode 100644 index 0000000000..f2cc3c69b8 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/down @@ -0,0 +1 @@ +pg_ctl -D /home/aiida/.postgresql stop diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/timeout-up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/type b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/up new file mode 100644 index 0000000000..776d110d6c --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/postgresql/up @@ -0,0 +1,5 @@ +#!/command/execlineb -P + +with-contenv + +pg_ctl -D /home/aiida/.postgresql -l /home/${SYSTEM_USER}/.postgresql/logfile start diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/dependencies.d/base b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/type b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/up b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/up new file mode 100644 index 0000000000..e574020053 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq-init/up @@ -0,0 +1,5 @@ +#!/command/execlineb -S0 +with-contenv + +foreground { s6-echo "Calling /etc/init/rabbitmq-init.sh" } +/etc/init/rabbitmq-init.sh diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/base b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/rabbitmq-init b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/dependencies.d/rabbitmq-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal new file mode 100644 index 0000000000..d751378e19 --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/down-signal @@ -0,0 +1 @@ +SIGINT diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run new file mode 100644 index 0000000000..e5752294ff --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/run @@ -0,0 +1,6 @@ +#!/command/execlineb -P + +with-contenv + +foreground { s6-echo "Calling /etc/init/rabbitmq.sh" } +rabbitmq-server diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/type b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/type new file mode 100644 index 0000000000..5883cff0cd --- /dev/null +++ b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/rabbitmq/type @@ -0,0 +1 @@ +longrun diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/aiida-prepare b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-init b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-prepare b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/postgresql-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq-init b/.docker/aiida-core-with-services/s6-assets/s6-rc.d/user/contents.d/rabbitmq-init new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/build.json b/.docker/build.json new file mode 100644 index 0000000000..554a4d95c7 --- /dev/null +++ b/.docker/build.json @@ -0,0 +1,13 @@ +{ + "variable": { + "PYTHON_VERSION": { + "default": "3.9.13" + }, + "PGSQL_VERSION": { + "default": "15" + }, + "RMQ_VERSION": { + "default": "3.10.18" + } + } + } diff --git a/.docker/docker-bake.hcl b/.docker/docker-bake.hcl new file mode 100644 index 0000000000..66f7bf5603 --- /dev/null +++ b/.docker/docker-bake.hcl @@ -0,0 +1,67 @@ +# docker-bake.hcl +variable "VERSION" { +} + +variable "PYTHON_VERSION" { +} + +variable "PGSQL_VERSION" { +} + +variable "ORGANIZATION" { + default = "aiidateam" +} + +variable "REGISTRY" { + default = "docker.io/" +} + +variable "PLATFORMS" { + default = ["linux/amd64"] +} + +variable "TARGETS" { + default = ["aiida-core-base", "aiida-core-with-services"] +} + +function "tags" { + params = [image] + result = [ + "${REGISTRY}${ORGANIZATION}/${image}:newly-baked" + ] +} + +group "default" { + targets = "${TARGETS}" +} + +target "aiida-core-base-meta" { + tags = tags("aiida-core-base") +} +target "aiida-core-with-services-meta" { + tags = tags("aiida-core-with-services") +} + +target "aiida-core-base" { + inherits = ["aiida-core-base-meta"] + context = "aiida-core-base" + contexts = { + src = ".." + } + platforms = "${PLATFORMS}" + args = { + "PYTHON_VERSION" = "${PYTHON_VERSION}" + } +} +target "aiida-core-with-services" { + inherits = ["aiida-core-with-services-meta"] + context = "aiida-core-with-services" + contexts = { + aiida-core-base = "target:aiida-core-base" + } + platforms = "${PLATFORMS}" + args = { + "PGSQL_VERSION" = "${PGSQL_VERSION}" + "RMQ_VERSION" = "${RMQ_VERSION}" + } +} diff --git a/.docker/docker-compose.aiida-core-base.yml b/.docker/docker-compose.aiida-core-base.yml new file mode 100644 index 0000000000..a3943fd089 --- /dev/null +++ b/.docker/docker-compose.aiida-core-base.yml @@ -0,0 +1,49 @@ +--- +version: '3.4' + +services: + + database: + image: postgres:15 + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + # volumes: + # - aiida-postgres-db:/var/lib/postgresql/data + healthcheck: + test: [ "CMD-SHELL", "pg_isready"] + interval: 5s + timeout: 5s + retries: 10 + + messaging: + image: rabbitmq:3.8.14-management + environment: + RABBITMQ_DEFAULT_USER: guest + RABBITMQ_DEFAULT_PASS: guest + # volumes: + # - aiida-rmq-data:/var/lib/rabbitmq/ + healthcheck: + test: rabbitmq-diagnostics check_port_connectivity + interval: 30s + timeout: 30s + retries: 10 + + aiida: + image: ${REGISTRY:-}${BASE_IMAGE:-aiidateam/aiida-core-base}:${TAG:-latest} + environment: + RMQHOST: messaging + TZ: Europe/Zurich + SETUP_DEFAULT_AIIDA_PROFILE: 'true' + # volumes: + # - aiida-home-folder:/home/aiida + depends_on: + database: + condition: service_healthy + #messaging: + # condition: service_healthy + +#volumes: +# aiida-postgres-db: +# aiida-rmq-data: +# aiida-home-folder: diff --git a/.docker/docker-compose.aiida-core-with-services.yml b/.docker/docker-compose.aiida-core-with-services.yml new file mode 100644 index 0000000000..cdc0253f7f --- /dev/null +++ b/.docker/docker-compose.aiida-core-with-services.yml @@ -0,0 +1,15 @@ +--- +version: '3.4' + +services: + + aiida: + image: ${REGISTRY:-}${BASE_IMAGE:-aiidateam/aiida-core-with-services}:${TAG:-latest} + environment: + TZ: Europe/Zurich + SETUP_DEFAULT_AIIDA_PROFILE: 'true' + #volumes: + # - aiida-home-folder:/home/aiida + +volumes: + aiida-home-folder: diff --git a/.docker/docker-rabbitmq.yml b/.docker/docker-rabbitmq.yml deleted file mode 100644 index da266790ff..0000000000 --- a/.docker/docker-rabbitmq.yml +++ /dev/null @@ -1,34 +0,0 @@ -# A small configuration for use in local CI testing, -# if you wish to control the rabbitmq used. - -# Simply install docker, then run: -# $ docker-compose -f .docker/docker-rabbitmq.yml up -d - -# and to power down, after testing: -# $ docker-compose -f .docker/docker-rabbitmq.yml down - -# you can monitor rabbitmq use at: http://localhost:15672 - -version: '3.4' - -services: - - rabbit: - image: rabbitmq:3.8.3-management - container_name: aiida-rmq - environment: - RABBITMQ_DEFAULT_USER: guest - RABBITMQ_DEFAULT_PASS: guest - ports: - - '5672:5672' - - '15672:15672' - healthcheck: - test: rabbitmq-diagnostics -q ping - interval: 30s - timeout: 30s - retries: 5 - networks: - - aiida-rmq - -networks: - aiida-rmq: diff --git a/.docker/my_init.d/configure-aiida.sh b/.docker/my_init.d/configure-aiida.sh deleted file mode 100755 index 7ac4476b07..0000000000 --- a/.docker/my_init.d/configure-aiida.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -set -em - -su -c /opt/configure-aiida.sh ${SYSTEM_USER} diff --git a/.docker/pytest.ini b/.docker/pytest.ini new file mode 100644 index 0000000000..d1e7877377 --- /dev/null +++ b/.docker/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +minversion = 7.0 +addopts = -ra -q +testpaths = + tests diff --git a/.docker/requirements.txt b/.docker/requirements.txt new file mode 100644 index 0000000000..3ba15482bb --- /dev/null +++ b/.docker/requirements.txt @@ -0,0 +1,8 @@ +docker +pre-commit +pytest +requests +tabulate +pytest-docker +docker-compose +pyyaml<=5.3.1 diff --git a/.docker/tests/conftest.py b/.docker/tests/conftest.py new file mode 100644 index 0000000000..2d222353ac --- /dev/null +++ b/.docker/tests/conftest.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# pylint: disable=missing-docstring, redefined-outer-name +import json +from pathlib import Path + +import pytest + + +@pytest.fixture(scope='session', params=['aiida-core-base', 'aiida-core-with-services']) +def variant(request): + return request.param + + +@pytest.fixture(scope='session') +def docker_compose_file(pytestconfig, variant): # pylint: disable=unused-argument + return f'docker-compose.{variant}.yml' + + +@pytest.fixture(scope='session') +def docker_compose(docker_services): + # pylint: disable=protected-access + return docker_services._docker_compose + + +@pytest.fixture +def timeout(): + """Container and service startup timeout""" + return 30 + + +@pytest.fixture +def container_user(): + return 'aiida' + + +@pytest.fixture +def aiida_exec(docker_compose): + + def execute(command, user=None, **kwargs): + if user: + command = f'exec -T --user={user} aiida {command}' + else: + command = f'exec -T aiida {command}' + return docker_compose.execute(command, **kwargs) + + return execute + + +@pytest.fixture(scope='session') +def _build_config(): + return json.loads(Path('build.json').read_text(encoding='utf-8'))['variable'] + + +@pytest.fixture(scope='session') +def python_version(_build_config): + return _build_config['PYTHON_VERSION']['default'] + + +@pytest.fixture(scope='session') +def pgsql_version(_build_config): + return _build_config['PGSQL_VERSION']['default'] diff --git a/.docker/tests/test_aiida.py b/.docker/tests/test_aiida.py new file mode 100644 index 0000000000..803bc855d7 --- /dev/null +++ b/.docker/tests/test_aiida.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# pylint: disable=missing-docstring +import json +import time + +from packaging.version import parse +import pytest + + +def test_correct_python_version_installed(aiida_exec, python_version): + info = json.loads(aiida_exec('mamba list --json --full-name python').decode())[0] + assert info['name'] == 'python' + assert parse(info['version']) == parse(python_version) + + +def test_correct_pgsql_version_installed(aiida_exec, pgsql_version, variant): + if variant == 'aiida-core-base': + pytest.skip('PostgreSQL is not installed in the base image') + + info = json.loads(aiida_exec('mamba list --json --full-name postgresql').decode())[0] + assert info['name'] == 'postgresql' + assert parse(info['version']).major == parse(pgsql_version).major + + +def test_verdi_status(aiida_exec, container_user, timeout): + time.sleep(timeout) + output = aiida_exec('verdi status', user=container_user).decode().strip() + assert 'Connected to RabbitMQ' in output + assert 'Daemon is running' in output + + # check that we have suppressed the warnings + assert 'Warning' not in output diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index dfe06bad59..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,13 +0,0 @@ -.benchmarks -.cache -.coverage -.mypy_cache -.pytest_cache -.tox -.vscode -aiida_core.egg-info -docs/build -pip-wheel-metadata -**/.DS_Store -**/*.pyc -**/__pycache__ diff --git a/.github/actions/create-dev-env/action.yml b/.github/actions/create-dev-env/action.yml new file mode 100644 index 0000000000..1142844757 --- /dev/null +++ b/.github/actions/create-dev-env/action.yml @@ -0,0 +1,27 @@ +--- +name: Build environment +description: Create build environment + +inputs: + architecture: + description: architecture to be run on + required: true + type: string + +runs: + using: composite + steps: + # actions/setup-python doesn't support Linux arm64 runners + # See: https://github.com/actions/setup-python/issues/108 + # python3 is manually preinstalled in the arm64 VM self-hosted runner + - name: Set Up Python ๐Ÿ + uses: actions/setup-python@v4 + with: + python-version: 3.x + if: ${{ inputs.architecture == 'amd64' }} + + - name: Install Dev Dependencies ๐Ÿ“ฆ + run: | + pip install --upgrade pip + pip install --upgrade -r .docker/requirements.txt + shell: bash diff --git a/.github/actions/load-image/action.yml b/.github/actions/load-image/action.yml new file mode 100644 index 0000000000..5909cdd518 --- /dev/null +++ b/.github/actions/load-image/action.yml @@ -0,0 +1,31 @@ +--- +name: Load Docker image +description: Download image tar and load it to docker + +inputs: + image: + description: Image name + required: true + type: string + architecture: + description: Image architecture + required: true + type: string + +runs: + using: composite + steps: + - name: Download built image ๐Ÿ“ฅ + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.image }}-${{ inputs.architecture }} + path: /tmp/ + - name: Load downloaded image to docker ๐Ÿ“ฅ + run: | + docker load --input /tmp/${{ inputs.image }}-${{ inputs.architecture }}.tar + docker image ls --all + shell: bash + - name: Delete the file ๐Ÿ—‘๏ธ + run: rm -f /tmp/${{ inputs.image }}-${{ inputs.architecture }}.tar + shell: bash + if: always() diff --git a/.github/workflows/build_and_test_docker_on_pr.yml b/.github/workflows/build_and_test_docker_on_pr.yml deleted file mode 100644 index 0a318c059b..0000000000 --- a/.github/workflows/build_and_test_docker_on_pr.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Test the Docker image on every pull request. -# -# The steps are: -# 1. Build docker image using cached data. -# 2. Start the docker container. -# 3. Check that AiiDA is responsive. - -name: build-and-test-image-from-pull-request - -on: - pull_request: - path_ignore: - - 'docs/**' - -jobs: - - build-and-test: - - # Only run this job on the main repository and not on forks - if: github.repository == 'aiidateam/aiida-core' - - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - - uses: actions/checkout@v2 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Cache Docker layers - uses: actions/cache@v2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - - name: Build image locally - uses: docker/build-push-action@v2 - with: - load: true - push: false - tags: aiida-core:latest - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - - name: Start and test the container - run: | - export DOCKERID=`docker run -d aiida-core:latest` - docker exec --tty $DOCKERID wait-for-services - docker logs $DOCKERID - docker exec --tty --user aiida $DOCKERID /bin/bash -l -c 'verdi profile show default' - docker exec --tty --user aiida $DOCKERID /bin/bash -l -c 'verdi computer show localhost' - docker exec --tty --user aiida $DOCKERID /bin/bash -l -c 'verdi daemon status' diff --git a/.github/workflows/docker-build-test-upload.yml b/.github/workflows/docker-build-test-upload.yml new file mode 100644 index 0000000000..b7c433114f --- /dev/null +++ b/.github/workflows/docker-build-test-upload.yml @@ -0,0 +1,64 @@ +--- +name: Build image and then upload the image, tags and manifests to GitHub artifacts + +env: + OWNER: ${{ github.repository_owner }} + +on: + workflow_call: + inputs: + architecture: + description: Image architecture, e.g. amd64, arm64 + required: true + type: string + runsOn: + description: GitHub Actions Runner image + required: true + type: string + +jobs: + build-test-upload: + runs-on: ${{ inputs.runsOn }} + defaults: + run: + shell: bash + working-directory: .docker + + steps: + - name: Checkout Repo โšก๏ธ + uses: actions/checkout@v3 + - name: Create dev environment ๐Ÿ“ฆ + uses: ./.github/actions/create-dev-env + with: + architecture: ${{ inputs.architecture }} + + - name: Build image base and base-with-services (output image name aiida-coer ยง) ๐Ÿ›  + # The order of the buildx bake files is important, as the second one will overwrite the first one + run: docker buildx bake -f docker-bake.hcl -f build.json --set *.platform=linux/${{ inputs.architecture }} --load + env: + # Full logs for CI build + BUILDKIT_PROGRESS: plain + + - name: Run tests โœ… + run: TAG=newly-baked python -m pytest -s tests + + - name: Save image as a tar for later use ๐Ÿ’พ + run: | + docker save ${{ env.OWNER }}/aiida-core-base -o /tmp/aiida-core-base-${{ inputs.architecture }}.tar + docker save ${{ env.OWNER }}/aiida-core-with-services -o /tmp/aiida-core-with-services-${{ inputs.architecture }}.tar + + - name: Upload aiida-core-base image as artifact ๐Ÿ’พ + uses: actions/upload-artifact@v3 + with: + name: aiida-core-base-${{ inputs.architecture }} + path: /tmp/aiida-core-base-${{ inputs.architecture }}.tar + retention-days: 3 + if: ${{ !github.event.pull_request.head.repo.fork }} + + - name: Upload aiida-core-with-services image as artifact ๐Ÿ’พ + uses: actions/upload-artifact@v3 + with: + name: aiida-core-with-services-${{ inputs.architecture }} + path: /tmp/aiida-core-with-services-${{ inputs.architecture }}.tar + retention-days: 3 + if: ${{ !github.event.pull_request.head.repo.fork }} diff --git a/.github/workflows/docker-merge-tags.yml b/.github/workflows/docker-merge-tags.yml new file mode 100644 index 0000000000..a3e145b395 --- /dev/null +++ b/.github/workflows/docker-merge-tags.yml @@ -0,0 +1,66 @@ +--- +name: Download images tags from GitHub artifacts and create multi-platform manifests + +on: + workflow_call: + inputs: + registry: + description: Docker registry, e.g. ghcr.io, docker.io + required: true + type: string + secrets: + REGISTRY_USERNAME: + required: true + REGISTRY_TOKEN: + required: true + + +jobs: + merge-tags: + runs-on: ubuntu-latest + strategy: + matrix: + image: ["aiida-core-base", "aiida-core-with-services"] + permissions: + packages: write + + steps: + - name: Checkout Repo โšก๏ธ + uses: actions/checkout@v3 + - name: Create dev environment ๐Ÿ“ฆ + uses: ./.github/actions/create-dev-env + with: + architecture: amd64 + + - name: Download amd64 tags file ๐Ÿ“ฅ + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.registry }}-${{ matrix.image }}-amd64-tags + path: /tmp/ + - name: Download arm64 tags file ๐Ÿ“ฅ + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.registry }}-${{ matrix.image }}-arm64-tags + path: /tmp/ + + - name: Login to Container Registry ๐Ÿ”‘ + uses: docker/login-action@v2 + with: + registry: ${{ inputs.registry }} + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_TOKEN }} + + - name: Merge tags for the images of different arch ๐Ÿ”€ + run: | + for arch_tag in $(cat /tmp/${{ matrix.image }}-amd64-tags.txt); do + tag=$(echo $arch_tag | sed "s/:amd64-/:/") + docker manifest create $tag --amend $arch_tag + docker manifest push $tag + done + + for arch_tag in $(cat /tmp/${{ matrix.image }}-arm64-tags.txt); do + tag=$(echo $arch_tag | sed "s/:arm64-/:/") + docker manifest create $tag --amend $arch_tag + docker manifest push $tag + done + shell: bash diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml new file mode 100644 index 0000000000..cbe50674fa --- /dev/null +++ b/.github/workflows/docker-push.yml @@ -0,0 +1,96 @@ +--- +name: Download Docker image and its tags from GitHub artifacts, apply them and push the image to container registry + +env: + OWNER: ${{ github.repository_owner }} + +on: + workflow_call: + inputs: + architecture: + description: Image architecture + required: true + type: string + registry: + description: Docker registry + required: true + type: string + secrets: + REGISTRY_USERNAME: + required: true + REGISTRY_TOKEN: + required: true + +jobs: + tag-push: + runs-on: ubuntu-latest + strategy: + matrix: + image: ["aiida-core-base", "aiida-core-with-services"] + defaults: + run: + shell: bash + working-directory: .docker + permissions: + packages: write + + steps: + - name: Checkout Repo โšก๏ธ + uses: actions/checkout@v3 + - name: Create dev environment ๐Ÿ“ฆ + uses: ./.github/actions/create-dev-env + with: + architecture: ${{ inputs.architecture }} + - name: Load image to Docker ๐Ÿ“ฅ + uses: ./.github/actions/load-image + with: + image: ${{ matrix.image }} + architecture: ${{ inputs.architecture }} + + - name: Read build variables + id: build_vars + run: | + vars=$(cat build.json | jq -c '[.variable | to_entries[] | {"key": .key, "value": .value.default}] | from_entries') + echo "vars=$vars" >> "${GITHUB_OUTPUT}" + + - name: Docker meta ๐Ÿ“ + id: meta + uses: docker/metadata-action@v4 + env: ${{ fromJson(steps.build_vars.outputs.vars) }} + with: + images: | + name=${{ inputs.registry }}/${{ env.OWNER }}/${{ matrix.image }} + tags: | + type=edge,enable={{is_default_branch}} + type=sha,enable=${{ github.ref_type != 'tag' }} + type=ref,event=pr + type=match,pattern=v(\d+\.\d+.\d+),group=1 + type=raw,value={{tag}},enable=${{ startsWith(github.ref, 'refs/tags/v') }} + type=raw,value=python-${{ env.PYTHON_VERSION }},enable=${{ startsWith(github.ref, 'refs/tags/v') }} + type=raw,value=postgresql-${{ env.PGSQL_VERSION }},enable=${{ startsWith(github.ref, 'refs/tags/v') }} + + - name: Login to Container Registry ๐Ÿ”‘ + uses: docker/login-action@v2 + with: + registry: ${{ inputs.registry }} + username: ${{ secrets.REGISTRY_USERNAME }} + password: ${{ secrets.REGISTRY_TOKEN }} + + - name: Set tags for image and push ๐Ÿท๏ธ๐Ÿ“ค๐Ÿ’พ + run: | + declare -a arr=(${{ steps.meta.outputs.tags }}) + for tag in "${arr[@]}"; do + arch_tag=$(echo ${tag} | sed "s/:/:${{ inputs.architecture }}-/") + docker tag ${{ env.OWNER }}/${{ matrix.image }}:newly-baked ${arch_tag} + docker push ${arch_tag} + + # write tag to file + echo ${arch_tag} >> /tmp/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt + done + + - name: Upload tags file ๐Ÿ“ค + uses: actions/upload-artifact@v3 + with: + name: ${{ inputs.registry }}-${{ matrix.image }}-${{ inputs.architecture }}-tags + path: /tmp/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt + retention-days: 3 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000..c621d9e5d5 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,101 @@ +--- +name: Build, test and push Docker Images + +on: + pull_request: + paths: + - .docker/** + - .github/workflows/docker-*.yml + push: + branches: + - main + tags: + - "v*" + paths: + - .docker/** + - .github/workflows/docker-*.yml + workflow_dispatch: + +# https://docs.github.com/en/actions/using-jobs/using-concurrency +concurrency: + # only cancel in-progress jobs or runs for the current workflow - matches against branch & tags + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + amd64-build: + uses: ./.github/workflows/docker-build-test-upload.yml + with: + architecture: amd64 + runsOn: ubuntu-latest + + arm64-build: + uses: ./.github/workflows/docker-build-test-upload.yml + with: + architecture: arm64 + runsOn: buildjet-2vcpu-ubuntu-2204-arm + if: ${{ !github.event.pull_request.head.repo.fork }} + + amd64-push-ghcr: + if: github.repository == 'aiidateam/aiida-core' + uses: ./.github/workflows/docker-push.yml + with: + architecture: amd64 + registry: ghcr.io + secrets: + REGISTRY_USERNAME: ${{ github.actor }} + REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} + needs: [amd64-build] + + arm64-push-ghcr: + uses: ./.github/workflows/docker-push.yml + with: + architecture: arm64 + registry: ghcr.io + secrets: + REGISTRY_USERNAME: ${{ github.actor }} + REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} + needs: [arm64-build] + if: ${{ !github.event.pull_request.head.repo.fork }} + + merge-tags-ghcr: + uses: ./.github/workflows/docker-merge-tags.yml + with: + registry: ghcr.io + secrets: + REGISTRY_USERNAME: ${{ github.actor }} + REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} + needs: [amd64-push-ghcr, arm64-push-ghcr] + if: ${{ !github.event.pull_request.head.repo.fork }} + + amd64-push-dockerhub: + if: github.repository == 'aiidateam/aiida-core' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) + uses: ./.github/workflows/docker-push.yml + with: + architecture: amd64 + registry: docker.io + secrets: + REGISTRY_USERNAME: ${{ secrets.DOCKER_USERNAME }} + REGISTRY_TOKEN: ${{ secrets.DOCKER_TOKEN }} + needs: [amd64-build] + + arm64-push-dockerhub: + if: github.repository == 'aiidateam/aiida-core' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) + uses: ./.github/workflows/docker-push.yml + with: + architecture: arm64 + registry: docker.io + secrets: + REGISTRY_USERNAME: ${{ secrets.DOCKER_USERNAME }} + REGISTRY_TOKEN: ${{ secrets.DOCKER_TOKEN }} + needs: [arm64-build] + + merge-tags-dockerhub: + if: github.repository == 'aiidateam/aiida-core' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) + uses: ./.github/workflows/docker-merge-tags.yml + with: + registry: docker.io + secrets: + REGISTRY_USERNAME: ${{ secrets.DOCKER_USERNAME }} + REGISTRY_TOKEN: ${{ secrets.DOCKER_TOKEN }} + needs: [amd64-push-dockerhub, arm64-push-dockerhub] diff --git a/.github/workflows/push_image_to_dockerhub.yml b/.github/workflows/push_image_to_dockerhub.yml deleted file mode 100644 index 3178e78e04..0000000000 --- a/.github/workflows/push_image_to_dockerhub.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Build the new Docker image on every commit to the main branch and on every new tag. -# No caching is involved for the image build. The new image is then pushed to the Docker Hub. - -name: build-and-push-to-dockerhub - -on: - push: - branches: - - main - tags: - - "v[0-9]+.[0-9]+.[0-9]+*" - -jobs: - - build-and-push: - - # Only run this job on the main repository and not on forks - if: github.repository == 'aiidateam/aiida-core' - - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - - uses: actions/checkout@v2 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: ${{ github.repository }} - tags: | - type=ref,event=branch - type=semver,pattern={{version}} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_TOKEN }} - - - name: Build and push - id: docker_build - uses: docker/build-push-action@v2 - with: - push: true - platforms: linux/amd64, linux/arm64 - tags: ${{ steps.meta.outputs.tags }} diff --git a/.gitignore b/.gitignore index 3cf188d3f3..4f7e25f265 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,6 @@ docs/source/reference/apidoc _sandbox pplot_out/ + +# docker +docker-bake.override.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33c6a927d2..a85634a2c3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,7 +42,7 @@ repos: - id: yapf name: yapf types: [python] - exclude: &exclude_files > + exclude: | (?x)^( docs/.*| )$ @@ -74,6 +74,7 @@ repos: (?x)^( .github/.*| .molecule/.*| + .docker/.*| docs/.*| utils/.*| @@ -202,7 +203,11 @@ repos: entry: pylint types: [python] language: system - exclude: *exclude_files + exclude: | + (?x)^( + docs/.*| + .docker/.*| + )$ - id: dm-generate-all name: Update all requirements files diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9085a5e3ab..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM aiidateam/aiida-prerequisites:0.7.0 - -USER root - -ENV SETUP_DEFAULT_PROFILE true - -ENV PROFILE_NAME default -ENV USER_EMAIL aiida@localhost -ENV USER_FIRST_NAME Giuseppe -ENV USER_LAST_NAME Verdi -ENV USER_INSTITUTION Khedivial -ENV AIIDADB_BACKEND core.psql_dos - -# Copy and install AiiDA -COPY . aiida-core -RUN pip install ./aiida-core[atomic_tools] - -# Configure aiida for the user -COPY .docker/opt/configure-aiida.sh /opt/configure-aiida.sh -COPY .docker/my_init.d/configure-aiida.sh /etc/my_init.d/40_configure-aiida.sh - -# Use phusion baseimage docker init system. -CMD ["/sbin/my_init"] diff --git a/docs/source/intro/run_docker.rst b/docs/source/intro/run_docker.rst index 071eb35dab..669dffae6c 100644 --- a/docs/source/intro/run_docker.rst +++ b/docs/source/intro/run_docker.rst @@ -15,7 +15,15 @@ This image contains a fully pre-configured AiiDA environment which makes it part .. grid:: 1 :gutter: 3 - .. grid-item-card:: Start container + .. grid-item-card:: Install Docker on your warkstation or laptop + + To install Docker, please refer to the `official documentation `__. + + .. note:: + + If you are using Linux, you need to have root privileges to do `post-installation steps for the Docker Engine `__. + + .. grid-item-card:: Start container and use AiiDA interactively First, pull the image: @@ -27,23 +35,23 @@ This image contains a fully pre-configured AiiDA environment which makes it part .. parsed-literal:: - $ docker run -d --name aiida-container aiidateam/aiida-core:latest + $ docker run -it aiidateam/aiida-core:latest bash - You can use the following command to block until all services have started up: + You can specify a name for the container with the ``--name`` option for easier reference later on: - .. code-block:: console + .. parsed-literal:: - $ docker exec -t aiida-container wait-for-services + $ docker run -it --name aiida-container aiidateam/aiida-core:latest bash .. grid-item-card:: Check setup - The default profile is created under the ``aiida`` user, so to execute commands you must add the ``--user aiida`` option. + The prfile named ``default`` is created under the ``aiida`` user. - For example, to check the verdi status, execute: + For example, to check the verdi status, execute the following command inside the container: .. code-block:: console - $ docker exec -t --user aiida aiida-container /bin/bash -l -c 'verdi status' + $ verdi status โœ“ config dir: /home/aiida/.aiida โœ“ profile: On profile default โœ“ repository: /home/aiida/.aiida/repository/default @@ -51,24 +59,13 @@ This image contains a fully pre-configured AiiDA environment which makes it part โœ“ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 โœ“ daemon: Daemon is running as PID 1795 since 2020-05-20 02:54:00 - .. grid-item-card:: Use container interactively - - To "enter" the container and run commands directly in the shell, use: - - .. code-block:: console - - $ docker exec -it --user aiida aiida-container /bin/bash - - This will drop you into the shell within the container as the user "aiida". - .. grid-item-card:: Persist data across different containers - If you stop the container and start it again, any data you created will persist. + If you stop the container (`docker stop` or simply `Ctrl+D` from container) and start it again, any data you created will persist. .. code-block:: console - $ docker stop aiida-container - $ docker start aiida-container + $ docker start -i aiida-container However, if you remove the container, **all data will be removed as well**. @@ -78,19 +75,24 @@ This image contains a fully pre-configured AiiDA environment which makes it part $ docker rm aiida-container The preferred way to persistently store data is to `create a volume `__. + To create a simple volume, run: .. code-block:: console - $ docker volume create my-data + $ docker volume create container-home-data Then make sure to mount that volume when running the aiida container: .. parsed-literal:: - $ docker run -d --name aiida-container --mount source=my-data,target=/tmp/my_data aiidateam/aiida-core:latest + $ docker run -it --name aiida-container -v container-home-data:/home/aiida aiidateam/aiida-core:latest + + Starting the container with the above command, ensures that any data stored in the ``/home/aiida`` path within the container is stored in the ``conatiner-home-data`` volume and therefore persists even if the container is removed. + + To persistently store the python packages installed in the container, use `--user` flag when installing packages with pip, the packages will be installed in the ``/home/aiida/.local`` path which is mounted to the ``container-home-data`` volume. - Starting the container with the above command, ensures that any data stored in the ``/tmp/my_data`` path within the container is stored in the ``my-data`` volume and therefore persists even if the container is removed. + You can also mount a local directory instead of a volume and to other container path, please refer to the `Docker documentation `__ for more information. .. button-ref:: intro:get_started:next :ref-type: ref diff --git a/pyproject.toml b/pyproject.toml index c4a3d20093..50fb889eca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -101,7 +101,7 @@ notebook = [ ] pre-commit = [ "mypy==0.991", - "packaging==20.3", + "packaging~=20.9", "pre-commit~=2.2", "pylint~=2.17.4", "pylint-aiida~=0.1.1", From 3428eda87a0494c6642392397ad6c747adbde9b4 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Tue, 12 Sep 2023 15:20:29 +0200 Subject: [PATCH 02/13] Devops: Upload artifact by PR from forks for docker workflow (#6119) Cherry-pick: aeaa90f5d3ec74d71167d88e8db2ff8c6f89e8bb --- .github/workflows/docker-build-test-upload.yml | 2 -- .github/workflows/docker.yml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/docker-build-test-upload.yml b/.github/workflows/docker-build-test-upload.yml index b7c433114f..11ca7f5a05 100644 --- a/.github/workflows/docker-build-test-upload.yml +++ b/.github/workflows/docker-build-test-upload.yml @@ -53,7 +53,6 @@ jobs: name: aiida-core-base-${{ inputs.architecture }} path: /tmp/aiida-core-base-${{ inputs.architecture }}.tar retention-days: 3 - if: ${{ !github.event.pull_request.head.repo.fork }} - name: Upload aiida-core-with-services image as artifact ๐Ÿ’พ uses: actions/upload-artifact@v3 @@ -61,4 +60,3 @@ jobs: name: aiida-core-with-services-${{ inputs.architecture }} path: /tmp/aiida-core-with-services-${{ inputs.architecture }}.tar retention-days: 3 - if: ${{ !github.event.pull_request.head.repo.fork }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c621d9e5d5..ddfee24615 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -37,7 +37,6 @@ jobs: if: ${{ !github.event.pull_request.head.repo.fork }} amd64-push-ghcr: - if: github.repository == 'aiidateam/aiida-core' uses: ./.github/workflows/docker-push.yml with: architecture: amd64 @@ -46,6 +45,7 @@ jobs: REGISTRY_USERNAME: ${{ github.actor }} REGISTRY_TOKEN: ${{ secrets.GITHUB_TOKEN }} needs: [amd64-build] + if: ${{ !github.event.pull_request.head.repo.fork }} arm64-push-ghcr: uses: ./.github/workflows/docker-push.yml From 69fd7b570656c707868eeaa7c87c680341ab66a1 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Fri, 22 Sep 2023 09:29:59 +0200 Subject: [PATCH 03/13] Devops: Follow-up docker build runner macOS-ARM64 (#6127) The buildjet arm64 runner has only three-month trials, after that we need to pay to use it. The self-hosted runner is deployed on the macOS-arm64 machine located in PSI. Cherry-pick: 34be3b657a20cbc2f246dc296c42ca9e1b620875 --- .../workflows/docker-build-test-upload.yml | 21 +++++++++++++++---- .github/workflows/docker-merge-tags.yml | 8 +++---- .github/workflows/docker-push.yml | 5 +++-- .github/workflows/docker.yml | 2 +- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/.github/workflows/docker-build-test-upload.yml b/.github/workflows/docker-build-test-upload.yml index 11ca7f5a05..99373e37a3 100644 --- a/.github/workflows/docker-build-test-upload.yml +++ b/.github/workflows/docker-build-test-upload.yml @@ -32,6 +32,18 @@ jobs: with: architecture: ${{ inputs.architecture }} + # Self-hosted runners share a state (whole VM) between runs + # Also, they might have running or stopped containers, + # which are not cleaned up by `docker system prun` + - name: Reset docker state and cleanup artifacts ๐Ÿ—‘๏ธ + if: ${{ inputs.platform != 'x86_64' }} + run: | + docker kill $(docker ps --quiet) || true + docker rm $(docker ps --all --quiet) || true + docker system prune --all --force + rm -rf /tmp/aiida-core/ + shell: bash + - name: Build image base and base-with-services (output image name aiida-coer ยง) ๐Ÿ›  # The order of the buildx bake files is important, as the second one will overwrite the first one run: docker buildx bake -f docker-bake.hcl -f build.json --set *.platform=linux/${{ inputs.architecture }} --load @@ -44,19 +56,20 @@ jobs: - name: Save image as a tar for later use ๐Ÿ’พ run: | - docker save ${{ env.OWNER }}/aiida-core-base -o /tmp/aiida-core-base-${{ inputs.architecture }}.tar - docker save ${{ env.OWNER }}/aiida-core-with-services -o /tmp/aiida-core-with-services-${{ inputs.architecture }}.tar + mkdir -p /tmp/aiida-core + docker save ${{ env.OWNER }}/aiida-core-base -o /tmp/aiida-core/aiida-core-base-${{ inputs.architecture }}.tar + docker save ${{ env.OWNER }}/aiida-core-with-services -o /tmp/aiida-core/aiida-core-with-services-${{ inputs.architecture }}.tar - name: Upload aiida-core-base image as artifact ๐Ÿ’พ uses: actions/upload-artifact@v3 with: name: aiida-core-base-${{ inputs.architecture }} - path: /tmp/aiida-core-base-${{ inputs.architecture }}.tar + path: /tmp/aiida-core/aiida-core-base-${{ inputs.architecture }}.tar retention-days: 3 - name: Upload aiida-core-with-services image as artifact ๐Ÿ’พ uses: actions/upload-artifact@v3 with: name: aiida-core-with-services-${{ inputs.architecture }} - path: /tmp/aiida-core-with-services-${{ inputs.architecture }}.tar + path: /tmp/aiida-core/aiida-core-with-services-${{ inputs.architecture }}.tar retention-days: 3 diff --git a/.github/workflows/docker-merge-tags.yml b/.github/workflows/docker-merge-tags.yml index a3e145b395..7b322a41d4 100644 --- a/.github/workflows/docker-merge-tags.yml +++ b/.github/workflows/docker-merge-tags.yml @@ -36,12 +36,12 @@ jobs: uses: actions/download-artifact@v3 with: name: ${{ inputs.registry }}-${{ matrix.image }}-amd64-tags - path: /tmp/ + path: /tmp/aiida-core - name: Download arm64 tags file ๐Ÿ“ฅ uses: actions/download-artifact@v3 with: name: ${{ inputs.registry }}-${{ matrix.image }}-arm64-tags - path: /tmp/ + path: /tmp/aiida-core - name: Login to Container Registry ๐Ÿ”‘ uses: docker/login-action@v2 @@ -52,13 +52,13 @@ jobs: - name: Merge tags for the images of different arch ๐Ÿ”€ run: | - for arch_tag in $(cat /tmp/${{ matrix.image }}-amd64-tags.txt); do + for arch_tag in $(cat /tmp/aiida-core/${{ matrix.image }}-amd64-tags.txt); do tag=$(echo $arch_tag | sed "s/:amd64-/:/") docker manifest create $tag --amend $arch_tag docker manifest push $tag done - for arch_tag in $(cat /tmp/${{ matrix.image }}-arm64-tags.txt); do + for arch_tag in $(cat /tmp/aiida-core/${{ matrix.image }}-arm64-tags.txt); do tag=$(echo $arch_tag | sed "s/:arm64-/:/") docker manifest create $tag --amend $arch_tag docker manifest push $tag diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml index cbe50674fa..8a3f097813 100644 --- a/.github/workflows/docker-push.yml +++ b/.github/workflows/docker-push.yml @@ -85,12 +85,13 @@ jobs: docker push ${arch_tag} # write tag to file - echo ${arch_tag} >> /tmp/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt + mkdir -p /tmp/aiida-core + echo ${arch_tag} >> /tmp/aiida-core/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt done - name: Upload tags file ๐Ÿ“ค uses: actions/upload-artifact@v3 with: name: ${{ inputs.registry }}-${{ matrix.image }}-${{ inputs.architecture }}-tags - path: /tmp/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt + path: /tmp/aiida-core/${{ matrix.image }}-${{ inputs.architecture }}-tags.txt retention-days: 3 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index ddfee24615..979823557e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -33,7 +33,7 @@ jobs: uses: ./.github/workflows/docker-build-test-upload.yml with: architecture: arm64 - runsOn: buildjet-2vcpu-ubuntu-2204-arm + runsOn: ARM64 if: ${{ !github.event.pull_request.head.repo.fork }} amd64-push-ghcr: From 441c150c55ccd15bad5fc016fc71b9a29ae16982 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Fri, 22 Sep 2023 10:29:53 +0200 Subject: [PATCH 04/13] Devops: Loosen trigger conditions for Docker build CI workflow (#6131) The docker build workflow was only activated when changes were made to either the `.docker` directory or `.github/workflows/docker*.yaml` files. However, changes in the `aiida` package could also break the build and so could pass by unnoticed. The trigger conditions are changed to instead trigger always except for changes to the `tests` and `docs` directories. Cherry-pick: 1b5b6692f2f9ab8b13d3d2469f854838364b4e38 --- .github/workflows/docker.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 979823557e..0fd6c50a0e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -3,17 +3,17 @@ name: Build, test and push Docker Images on: pull_request: - paths: - - .docker/** - - .github/workflows/docker-*.yml + paths-ignore: + - "docs/**" + - "tests/**" push: branches: - main tags: - "v*" - paths: - - .docker/** - - .github/workflows/docker-*.yml + paths-ignore: + - "docs/**" + - "tests/**" workflow_dispatch: # https://docs.github.com/en/actions/using-jobs/using-concurrency From c724eeaed303d81c84df5f3087f6baab250736f6 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Wed, 27 Sep 2023 19:26:30 +0200 Subject: [PATCH 05/13] Devops: Update the `.devcontainer` to use the new docker stack (#6139) Cherry-pick: 61828e26ad34ed592e8528ca58d68c1175afb926 --- .devcontainer/Dockerfile | 16 ------- .devcontainer/devcontainer.json | 15 ++++-- .devcontainer/docker-compose.yml | 80 +++++++++++++------------------- .devcontainer/post_create.sh | 4 -- 4 files changed, 43 insertions(+), 72 deletions(-) delete mode 100644 .devcontainer/Dockerfile delete mode 100644 .devcontainer/post_create.sh diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index bde97605f6..0000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Additions for dev container -FROM aiidateam/aiida-core:main - -# Add test dependencies (not installed in image) -RUN pip install ./aiida-core[tests,rest,docs,pre-commit] -# the `locate` command is needed by many tests -RUN apt-get update \ - && apt-get install -y mlocate \ - && rm -rf /var/lib/apt/lists/* - -# add aiida user -RUN /etc/my_init.d/10_create-system-user.sh - -# copy updated aiida configuration script -# this line can be deleted after the new script has been merged -COPY ../.docker/opt/configure-aiida.sh /opt/configure-aiida.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 5bbcf39152..4dfd532583 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,9 +1,16 @@ { "dockerComposeFile": "docker-compose.yml", - "service": "aiida", - "workspaceFolder": "/home/aiida/aiida-core", - "postCreateCommand": "bash ./.devcontainer/post_create.sh", - "waitFor": "postCreateCommand", + "service": "daemon", + "workspaceFolder": "/workspaces/aiida-core", + "postCreateCommand": "/etc/init/aiida-prepare.sh", + "postStartCommand": "pip install -e /workspaces/aiida-core[tests,docs,rest,atomic_tools,pre-commit]", + "postAttachCommand": "verdi daemon start", + "waitFor": "postStartCommand", + "containerUser": "aiida", + "remoteUser": "aiida", + "remoteEnv": { + "HOME": "/home/aiida" + }, "customizations": { "vscode": { "extensions": ["ms-python.python", "eamodio.gitlens"] diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 380a0a4760..18e8d3e513 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -1,54 +1,38 @@ +--- version: '3.4' services: - rabbitmq: - image: rabbitmq:3.8.3-management - environment: - RABBITMQ_DEFAULT_USER: guest - RABBITMQ_DEFAULT_PASS: guest - ports: - - '5672:5672' - - '15672:15672' + database: + image: postgres:15 + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + POSTGRES_HOST_AUTH_METHOD: trust + healthcheck: + test: [ "CMD-SHELL", "pg_isready"] + interval: 5s + timeout: 5s + retries: 10 - healthcheck: - test: rabbitmq-diagnostics -q ping - interval: 30s - timeout: 30s - retries: 5 - networks: - - aiida + messaging: + image: rabbitmq:3.8.14-management + environment: + RABBITMQ_DEFAULT_USER: guest + RABBITMQ_DEFAULT_PASS: guest + healthcheck: + test: rabbitmq-diagnostics check_port_connectivity + interval: 30s + timeout: 30s + retries: 10 - postgres: - image: postgres:12 - ports: - - '5432:5432' - networks: - - aiida - environment: - POSTGRES_HOST_AUTH_METHOD: trust - - aiida: - #image: "aiidateam/aiida-core:main" - image: "aiida-core-dev" - build: - # need to add the parent directory to context to copy over new configure-aiida.sh - context: .. - dockerfile: .devcontainer/Dockerfile - user: aiida - environment: - DB_HOST: postgres - BROKER_HOST: rabbitmq - - # no need for /sbin/my_init - entrypoint: tail -f /dev/null - volumes: - - ..:/home/aiida/aiida-core:cached - networks: - - aiida - depends_on: - - rabbitmq - - postgres - -networks: - aiida: + daemon: + image: aiidateam/aiida-core-base:edge + user: aiida + entrypoint: tail -f /dev/null + environment: + SETUP_DEFAULT_AIIDA_PROFILE: 'true' + TZ: 'Europe/Zurich' + depends_on: + database: + condition: service_healthy diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh deleted file mode 100644 index 71f8330853..0000000000 --- a/.devcontainer/post_create.sh +++ /dev/null @@ -1,4 +0,0 @@ - #!/bin/bash - -# configure aiida -/opt/configure-aiida.sh From a2b0af111363e2593db610047eb9e2f248c5d89a Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Thu, 28 Sep 2023 22:33:30 +0200 Subject: [PATCH 06/13] DevOps: amendment use aiida-core-base image from ghcr.io (#6141) Amendment to #6139, for unknown reason, docker pull is failed for docker.io on this repository. Using the docker registry ghcr.io works fine. Cherry-pick: 8d8b41dfaa0deca88ddd388feb61bca931644610 --- .devcontainer/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 18e8d3e513..038c636856 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -27,7 +27,7 @@ services: retries: 10 daemon: - image: aiidateam/aiida-core-base:edge + image: ghcr.io/aiidateam/aiida-core-base:edge user: aiida entrypoint: tail -f /dev/null environment: From a9b03564ad99bcfb91b8192789fe5e3b5e2fa598 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Tue, 10 Oct 2023 10:30:07 +0200 Subject: [PATCH 07/13] Docs: Update the image name for docker image (#6143) It was still pointing to the old name instead of the new `aiida-core-with-services`. Cherry-pick: 5ca609beecbed0282ac4e029990fe67a8dbaf6f3 --- docs/source/intro/run_docker.rst | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/source/intro/run_docker.rst b/docs/source/intro/run_docker.rst index 669dffae6c..70c4720132 100644 --- a/docs/source/intro/run_docker.rst +++ b/docs/source/intro/run_docker.rst @@ -5,12 +5,12 @@ Run AiiDA via a Docker image **************************** -The AiiDA team maintains a `Docker `__ image on `Docker Hub `__. +The AiiDA team maintains a `Docker `__ image on `Docker Hub `__. This image contains a fully pre-configured AiiDA environment which makes it particularly useful for learning and testing purposes. .. caution:: - All data stored in a container will persist only over the lifetime of that particular container unless you use volumes (see instructions below). + All data stored in a container will persist only over the lifetime of that particular container (i.e., removing the container will also purge the data) unless you use volumes (see instructions below). .. grid:: 1 :gutter: 3 @@ -19,29 +19,19 @@ This image contains a fully pre-configured AiiDA environment which makes it part To install Docker, please refer to the `official documentation `__. - .. note:: - - If you are using Linux, you need to have root privileges to do `post-installation steps for the Docker Engine `__. - .. grid-item-card:: Start container and use AiiDA interactively - First, pull the image: - - .. parsed-literal:: - - $ docker pull aiidateam/aiida-core:latest - - Then start the container with: + Start the container with (replace ``latest`` with the version you want to use, check the `Docker Hub `__ for available tags/versions): .. parsed-literal:: - $ docker run -it aiidateam/aiida-core:latest bash + $ docker run -it aiidateam/aiida-core-with-services:latest bash You can specify a name for the container with the ``--name`` option for easier reference later on: .. parsed-literal:: - $ docker run -it --name aiida-container aiidateam/aiida-core:latest bash + $ docker run -it --name aiida-container aiidateam/aiida-core-with-services:latest bash .. grid-item-card:: Check setup @@ -59,6 +49,16 @@ This image contains a fully pre-configured AiiDA environment which makes it part โœ“ rabbitmq: Connected as amqp://127.0.0.1?heartbeat=600 โœ“ daemon: Daemon is running as PID 1795 since 2020-05-20 02:54:00 + .. grid-item-card:: Copy files from your computer to the container + + To copy files from your computer to the container, use the ``docker cp`` command. + + For example, to copy a file named ``test.txt`` from your current working directory to the ``/home/aiida`` path in the container, run: + + .. code-block:: console + + $ docker cp test.txt aiida-container:/home/aiida + .. grid-item-card:: Persist data across different containers If you stop the container (`docker stop` or simply `Ctrl+D` from container) and start it again, any data you created will persist. From f16ff0d6ca56c2d59aa9b7ea2ffad41cae19bb36 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Sat, 4 Nov 2023 22:51:35 +0100 Subject: [PATCH 08/13] Docker: Pass environment variable to aiida-prepare script (#6169) Set `with-contenv` such that environment variables are forwarded. Without this, settings like the work dir of `localhost` will be set incorrectly and will cause calculations to fail. Cherry-pick: 6123f526cfcec26bea96f60cafeb2c6984da104c --- .../aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up | 2 ++ .docker/tests/test_aiida.py | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up index 60e82d7e43..b1045997fd 100644 --- a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-prepare/up @@ -1,4 +1,6 @@ #!/command/execlineb -S0 +with-contenv + foreground { s6-echo "Calling /etc/init/aiida-prepare" } /etc/init/aiida-prepare.sh diff --git a/.docker/tests/test_aiida.py b/.docker/tests/test_aiida.py index 803bc855d7..0ab8860231 100644 --- a/.docker/tests/test_aiida.py +++ b/.docker/tests/test_aiida.py @@ -30,3 +30,11 @@ def test_verdi_status(aiida_exec, container_user, timeout): # check that we have suppressed the warnings assert 'Warning' not in output + + +def test_computer_setup_success(aiida_exec, container_user, timeout): + time.sleep(timeout) + output = aiida_exec('verdi computer test localhost', user=container_user).decode().strip() + + assert 'Success' in output + assert 'Failed' not in output From 1d6cf4f0a329fe58a96731dedfc3d75936350a98 Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Thu, 9 Nov 2023 13:34:46 +0100 Subject: [PATCH 09/13] Docker: Add folders that automatically run scripts before/after daemon start (#6170) In order to simplify the implementation of using the `aiida-core` image as the base for customized images, the `run-before-daemon-start` and `run-after-daemon-start` script folders are created. Any executables in these two folders will be executed before and after the AiiDA daemon is started in the container, respectively. The standard linux `run-parts` tool is used to scan these folders for files, which are run in the lexical sort order of their names, according to the C/POSIX locale character collation rules Cherry-pick: 5fe6f4fda1eaf5a1e05e87063aa4eafa47fa1c07 --- .docker/aiida-core-base/Dockerfile | 2 ++ .../dependencies.d/run-before-daemon-start | 0 .../dependencies.d/aiida-daemon-start | 0 .../s6-rc.d/run-after-daemon-start/dependencies.d/base | 0 .../s6-assets/s6-rc.d/run-after-daemon-start/timeout-up | 1 + .../s6-assets/s6-rc.d/run-after-daemon-start/type | 1 + .../s6-assets/s6-rc.d/run-after-daemon-start/up | 6 ++++++ .../run-before-daemon-start/dependencies.d/aiida-prepare | 0 .../s6-rc.d/run-before-daemon-start/dependencies.d/base | 0 .../s6-assets/s6-rc.d/run-before-daemon-start/timeout-up | 1 + .../s6-assets/s6-rc.d/run-before-daemon-start/type | 1 + .../s6-assets/s6-rc.d/run-before-daemon-start/up | 6 ++++++ .../s6-rc.d/user/contents.d/run-after-daemon-start | 0 .../s6-rc.d/user/contents.d/run-before-daemon-start | 0 14 files changed, 18 insertions(+) create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/run-before-daemon-start create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/dependencies.d/aiida-daemon-start create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/dependencies.d/base create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/timeout-up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/type create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/dependencies.d/aiida-prepare create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/dependencies.d/base create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/timeout-up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/type create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/up create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/run-after-daemon-start create mode 100644 .docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/run-before-daemon-start diff --git a/.docker/aiida-core-base/Dockerfile b/.docker/aiida-core-base/Dockerfile index 17307203ec..085e100e07 100644 --- a/.docker/aiida-core-base/Dockerfile +++ b/.docker/aiida-core-base/Dockerfile @@ -161,6 +161,8 @@ RUN mkdir -p "${CONDA_DIR}/etc/conda/activate.d" && \ COPY --chown="${SYSTEM_UID}:${SYSTEM_GID}" s6-assets/config-quick-setup.yaml "/aiida/assets/config-quick-setup.yaml" COPY s6-assets/s6-rc.d /etc/s6-overlay/s6-rc.d COPY s6-assets/init /etc/init +RUN mkdir /etc/init/run-before-daemon-start && \ + mkdir /etc/init/run-after-daemon-start # Otherwise will stuck on oneshot services # https://github.com/just-containers/s6-overlay/issues/467 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/run-before-daemon-start b/.docker/aiida-core-base/s6-assets/s6-rc.d/aiida-daemon-start/dependencies.d/run-before-daemon-start new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/dependencies.d/aiida-daemon-start b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/dependencies.d/aiida-daemon-start new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/dependencies.d/base b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/timeout-up b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/type b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/up b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/up new file mode 100644 index 0000000000..d2e95d5190 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-after-daemon-start/up @@ -0,0 +1,6 @@ +#!/command/execlineb -P + +with-contenv + +foreground { s6-echo "Calling /etc/init/run-after-daemon-start" } +run-parts --regex=".*" /etc/init/run-after-daemon-start/ diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/dependencies.d/aiida-prepare b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/dependencies.d/aiida-prepare new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/dependencies.d/base b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/dependencies.d/base new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/timeout-up b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/timeout-up new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/timeout-up @@ -0,0 +1 @@ +0 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/type b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/type new file mode 100644 index 0000000000..bdd22a1850 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/type @@ -0,0 +1 @@ +oneshot diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/up b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/up new file mode 100644 index 0000000000..3ff7dc0360 --- /dev/null +++ b/.docker/aiida-core-base/s6-assets/s6-rc.d/run-before-daemon-start/up @@ -0,0 +1,6 @@ +#!/command/execlineb -P + +with-contenv + +foreground { s6-echo "Calling /etc/init/run-before-daemon-start" } +run-parts --regex=".*" /etc/init/run-before-daemon-start/ diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/run-after-daemon-start b/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/run-after-daemon-start new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/run-before-daemon-start b/.docker/aiida-core-base/s6-assets/s6-rc.d/user/contents.d/run-before-daemon-start new file mode 100644 index 0000000000..e69de29bb2 From 498c65c670c5c37350bdfe2849b1a276a7ad5c7b Mon Sep 17 00:00:00 2001 From: Jusong Yu Date: Tue, 14 Nov 2023 14:17:40 +0100 Subject: [PATCH 10/13] Devops: Trigger Docker image build when pushing to `support/*` branch (#6175) Cherry-pick: 9be2f25002788b57e2906256f90243a95b9f75ec --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0fd6c50a0e..877482a463 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -9,6 +9,7 @@ on: push: branches: - main + - 'support/**' tags: - "v*" paths-ignore: From 2a373f39c451c6922a90144a4050c21dff5bd0ab Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 27 Jul 2023 17:59:31 +0200 Subject: [PATCH 11/13] Tests: Fix `StructureData` test breaking for recent `pymatgen` versions (#6088) The roundtrip test for the `StructureData` class using `pymatgen` structures as a go between started failing. The structure is constructed from a CIF file with partial occupancies. The `label` attribute of each site in the pymatgen structure, as returned by `as_dict` would look like the following, originally: ['Bi', 'Bi', 'Te:0.667, Se:0.333', 'Te:0.667, Se:0.333', 'Te:0.667, Se:0.333'] ['Bi', 'Bi', 'Te:0.667, Se:0.333', 'Te:0.667, Se:0.333', 'Te:0.667, Se:0.333'] In commit 63bbd23b57ca2c68eaca07e4915a70ef66e13405, released with v2023.7.14, the CIF parsing logic in `pymatgen` was updated to include parsing of the atom site labels and store them on the site `label` attribute. This would result in the following site labels for the structure parsed directly from the CIF and the one after roundtrip through `StructureData`: ['Bi', 'Bi', 'Se1', 'Se1', 'Se1'] [None, None, None, None, None] The roundtrip returned `None` values because in the previously mentioned commit, the newly added `label` property would return `None` instead of the species label that used to be returned before. This behavior was corrected in commit 9a98f4ce722299d545f2af01a9eaf1c37ff7bd53 and released with v2023.7.20, after which the new behavior is the following: ['Bi', 'Bi', 'Se1', 'Se1', 'Se1'] ['Bi', 'Bi', 'Te:0.667, Se:0.333', 'Te:0.667, Se:0.333', 'Te:0.667, Se:0.333'] The site labels parsed from the CIF are not maintained in the roundtrip because the `StructureData` does not store them. Therefore when the final pymatgen structure is created from it, the `label` is `None` and so defaults to the species name. Since the label information is not persisted in the `StructureData` it is not guaranteed to be maintained in the roundtrip and so it is excluded from the test. Cherry-pick: d1d64e8004c31209488f71a160a4f4824d02c081 --- tests/test_dataclasses.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 74c2485783..551bc0716d 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2173,6 +2173,16 @@ def test_1(self): dict1 = pymatgen_struct.as_dict() dict2 = pymatgen_struct_roundtrip.as_dict() + # In pymatgen v2023.7.14 the CIF parsing was updated to include the parsing to atomic site labels. However, this + # information is not stored in the ``StructureData`` and so the structure after the roundtrip uses the default + # which is the specie name. The latter is correct in that it reflects the partial occupancies, but it differs + # from the labels parsed from the CIF which is simply parsed as ``Se1`` causing the test to fail. Since the + # site label information is not stored in the ``StructureData`` it is not possible to preserve it in the + # roundtrip and so it is excluded from the check. + for dictionary in [dict1, dict2]: + for site in dictionary['sites']: + site.pop('label', None) + for i in dict1['sites']: i['abc'] = [round(j, 2) for j in i['abc']] for i in dict2['sites']: @@ -2192,7 +2202,7 @@ def recursively_compare_values(left, right): elif isinstance(left, float): testing.assert_almost_equal(left, right) else: - assert left == right, f'{value} is not {right}' + assert left == right, f'{left} is not {right}' recursively_compare_values(dict1, dict2) From f12b8ea42c28091d1c9366641dcf10cd3c51ba5b Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 6 Sep 2023 07:52:36 +0200 Subject: [PATCH 12/13] Dependencies: Add compatibility for `pymatgen>=v2023.9.2` (#6109) As of v2023.9.2, the ``properties`` argument of the `Specie` class is removed and the ``spin`` argument should be used instead. See: https://github.com/materialsproject/pymatgen/commit/118c245d6082fe0b13e19d348fc1db9c0d512019 The ``spin`` argument was introduced in v2023.6.28. See: https://github.com/materialsproject/pymatgen/commit/9f2b3939af45d5129e0778d371d814811924aeb6 Instead of removing support for versions older than v2023.6.28 the code is updated to be able to deal with the new version where `properties` is no longer supported. Cherry-pick: 4e0e7d8e9fd10c4adc3630cf24cebdf749f95351 --- aiida/orm/nodes/data/structure.py | 34 ++++++++++++++++++++------- requirements/requirements-py-3.11.txt | 2 +- tests/test_dataclasses.py | 19 +++++++++++---- 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/aiida/orm/nodes/data/structure.py b/aiida/orm/nodes/data/structure.py index 8f8dc95e47..20800a5497 100644 --- a/aiida/orm/nodes/data/structure.py +++ b/aiida/orm/nodes/data/structure.py @@ -834,7 +834,17 @@ def build_kind_name(species_and_occu): species = list(species_and_occu.keys()) occupations = list(species_and_occu.values()) - has_spin = any(specie.as_dict().get('properties', {}).get('spin', 0) != 0 for specie in species) + # As of v2023.9.2, the ``properties`` argument is removed and the ``spin`` argument should be used. + # See: https://github.com/materialsproject/pymatgen/commit/118c245d6082fe0b13e19d348fc1db9c0d512019 + # The ``spin`` argument was introduced in v2023.6.28. + # See: https://github.com/materialsproject/pymatgen/commit/9f2b3939af45d5129e0778d371d814811924aeb6 + has_spin_attribute = hasattr(species[0], '_spin') + + if has_spin_attribute: + has_spin = any(specie.spin != 0 for specie in species) + else: + has_spin = any(specie.as_dict().get('properties', {}).get('spin', 0) != 0 for specie in species) + has_partial_occupancies = (len(occupations) != 1 or occupations[0] != 1.0) if has_partial_occupancies and has_spin: @@ -847,7 +857,10 @@ def build_kind_name(species_and_occu): # If there is spin, we can only have a single specie, otherwise we would have raised above specie = species[0] - spin = specie.as_dict().get('properties', {}).get('spin', 0) + if has_spin_attribute: + spin = specie.spin + else: + spin = specie.as_dict().get('properties', {}).get('spin', 0) if spin < 0: kind_name += '1' @@ -1846,13 +1859,16 @@ def _get_object_pymatgen_structure(self, **kwargs): kind = self.get_kind(site.kind_name) if len(kind.symbols) != 1 or (len(kind.weights) != 1 or sum(kind.weights) < 1.): raise ValueError('Cannot set partial occupancies and spins at the same time') - species.append( - Specie( - kind.symbols[0], - oxidation_state, - properties={'spin': -1 if kind.name.endswith('1') else 1 if kind.name.endswith('2') else 0} - ) - ) + spin = -1 if kind.name.endswith('1') else 1 if kind.name.endswith('2') else 0 + try: + specie = Specie(kind.symbols[0], oxidation_state, properties={'spin': spin}) # pylint: disable=unexpected-keyword-arg + except TypeError: + # As of v2023.9.2, the ``properties`` argument is removed and the ``spin`` argument should be used. + # See: https://github.com/materialsproject/pymatgen/commit/118c245d6082fe0b13e19d348fc1db9c0d512019 + # The ``spin`` argument was introduced in v2023.6.28. + # See: https://github.com/materialsproject/pymatgen/commit/9f2b3939af45d5129e0778d371d814811924aeb6 + specie = Specie(kind.symbols[0], oxidation_state, spin=spin) # pylint: disable=unexpected-keyword-arg + species.append(specie) else: # case when no spin are defined for site in self.sites: diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index 92afc7c8b0..398e7d5fdc 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -133,7 +133,7 @@ pycparser==2.21 pydantic==1.10.9 pydata-sphinx-theme==0.8.1 pygments==2.15.1 -pymatgen==2023.5.31 +pymatgen==2023.9.2 pympler==0.9 pymysql==0.9.3 pynacl==1.5.0 diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 551bc0716d..954dea434d 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2247,10 +2247,16 @@ def test_partial_occ_and_spin(self): from pymatgen.core.periodic_table import Specie from pymatgen.core.structure import Structure - Fe_spin_up = Specie('Fe', 0, properties={'spin': 1}) - Mn_spin_up = Specie('Mn', 0, properties={'spin': 1}) - Fe_spin_down = Specie('Fe', 0, properties={'spin': -1}) - Mn_spin_down = Specie('Mn', 0, properties={'spin': -1}) + try: + Fe_spin_up = Specie('Fe', 0, spin=1) # pylint: disable=unexpected-keyword-arg + Mn_spin_up = Specie('Mn', 0, spin=1) # pylint: disable=unexpected-keyword-arg + Fe_spin_down = Specie('Fe', 0, spin=-1) # pylint: disable=unexpected-keyword-arg + Mn_spin_down = Specie('Mn', 0, spin=-1) # pylint: disable=unexpected-keyword-arg + except TypeError: + Fe_spin_up = Specie('Fe', 0, properties={'spin': 1}) # pylint: disable=unexpected-keyword-arg + Mn_spin_up = Specie('Mn', 0, properties={'spin': 1}) # pylint: disable=unexpected-keyword-arg + Fe_spin_down = Specie('Fe', 0, properties={'spin': -1}) # pylint: disable=unexpected-keyword-arg + Mn_spin_down = Specie('Mn', 0, properties={'spin': -1}) # pylint: disable=unexpected-keyword-arg FeMn1 = Composition({Fe_spin_up: 0.5, Mn_spin_up: 0.5}) FeMn2 = Composition({Fe_spin_down: 0.5, Mn_spin_down: 0.5}) a = Structure( @@ -2440,7 +2446,10 @@ def test_roundtrip_spins(self): b = a.get_pymatgen(add_spin=True) # check the spins - assert [s.as_dict()['properties']['spin'] for s in b.species] == [-1, -1, -1, -1, 1, 1, 1, 1] + try: + assert [s.as_dict()['spin'] for s in b.species] == [-1, -1, -1, -1, 1, 1, 1, 1] + except KeyError: + assert [s.as_dict()['properties']['spin'] for s in b.species] == [-1, -1, -1, -1, 1, 1, 1, 1] # back to StructureData c = StructureData(pymatgen=b) assert c.get_site_kindnames() == ['Mn1', 'Mn1', 'Mn1', 'Mn1', 'Mn2', 'Mn2', 'Mn2', 'Mn2'] From 770ef25fe35a4ca74e7d44a3f4156a4843bc452c Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 5 Sep 2023 22:09:33 +0200 Subject: [PATCH 13/13] Tests: Make `PsqlDosStorage` profile unload test more robust (#6115) The `test_unload_profile` test verifies that if a loaded profile is unloaded, it properly relinquishes of the session that is maintained by sqlalchemy. It did so by checking that after unloading, there were no sessions being referenced. However, this would fail sometimes, because another session may still be held on to, even though that session had nothing to do with the test. A more robust test is simply to check that after unloading, there is exactly one less session being held on to. Cherry-pick: 1c72eac1f91e02bc464c66328ea74911762b94fd --- tests/storage/psql_dos/test_backend.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/storage/psql_dos/test_backend.py b/tests/storage/psql_dos/test_backend.py index 7c98f801da..1adb1bd5c8 100644 --- a/tests/storage/psql_dos/test_backend.py +++ b/tests/storage/psql_dos/test_backend.py @@ -147,16 +147,23 @@ def test_unload_profile(): This is a regression test for #5506. """ + import gc + from sqlalchemy.orm.session import _sessions # pylint: disable=import-outside-toplevel + # Run the garbage collector to ensure any lingering unrelated sessions do not cause the test to fail. + gc.collect() + # Just running the test suite itself should have opened at least one session - assert len(_sessions) != 0, str(_sessions) + current_sessions = len(_sessions) + assert current_sessions != 0, str(_sessions) manager = get_manager() profile_name = manager.get_profile().name try: manager.unload_profile() - assert len(_sessions) == 0, str(_sessions) + # After unloading, the session should have been cleared, so we should have one less + assert len(_sessions) == current_sessions - 1, str(_sessions) finally: manager.load_profile(profile_name)