diff --git a/.buildkite/deploy.sh b/.buildkite/deploy.sh deleted file mode 100755 index 34addd6b21..0000000000 --- a/.buildkite/deploy.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash - -if [[ $# -ne 1 ]]; then - echo "[deploy.sh] Environment to deploy to has not been specified!" - echo "[deploy.sh] Exiting." - exit 1 -else - DEPLOY_ENV="$1" - echo "[deploy.sh] Deploying to '$DEPLOY_ENV'" -fi - -# ----------------------------------------------------------------------------- -# sanity checks -# ----------------------------------------------------------------------------- - -echo "[deploy.sh] Sanity checking env variables" - -if [ -n "$PLUTUS_CI_AWS_ACCESS_KEY_ID" ]; then - echo "[deploy.sh] PLUTUS_CI_AWS_ACCESS_KEY_ID is set" -else - echo "[deploy.sh] PLUTUS_CI_AWS_ACCESS_KEY_ID is NOT set" - exit 1 -fi - -if [ -n "$PLUTUS_CI_AWS_SECRET_ACCESS_KEY" ]; then - echo "[deploy.sh] PLUTUS_CI_AWS_SECRET_ACCESS_KEY is set" -else - echo "[deploy.sh] PLUTUS_CI_AWS_SECRET_ACCESS_KEY is NOT set" - exit 1 -fi - -if [ -n "$PLUTUS_CI_PRIVATE_SSH" ]; then - echo "[deploy.sh] PLUTUS_CI_PRIVATE_SSH is set" -else - echo "[deploy.sh] PLUTUS_CI_PRIVATE_SSH is NOT set" - exit 1 -fi - -# ----------------------------------------------------------------------------- -# setup -# ----------------------------------------------------------------------------- - -export AWS_ACCESS_KEY_ID="$PLUTUS_CI_AWS_ACCESS_KEY_ID" -export AWS_SECRET_ACCESS_KEY="$PLUTUS_CI_AWS_SECRET_ACCESS_KEY" - -echo "[deploy.sh] Adding ssh key" -eval "$(ssh-agent)" -ssh-add - <<< "${PLUTUS_CI_PRIVATE_SSH}" -ssh-add -l - -# ----------------------------------------------------------------------------- -# starting deployment -# ----------------------------------------------------------------------------- - -cd deployment && nix-shell --argstr rev "$BUILDKITE_COMMIT" -A "$DEPLOY_ENV" --run "deploy" diff --git a/.gitattributes b/.gitattributes index bbe135acb7..8f4ae11108 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,7 +2,6 @@ nix/pkgs/haskell/materialized*/**/*.nix linguist-generated=true # linguist gets confused by PIR files, and thinks they make up a lot of our source! *.pir linguist-detectable=false -stubs/plutus-ghc-stub/** linguist-vendored=true # Large HTML files inside 'notes' are dominating our repoistory language # estimate. notes/**/*.html linguist-documentation diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 97e057b636..82aca06aec 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -6,14 +6,6 @@ labels: bug assignees: '' --- - -## Area - -[] *Plutus Foundation* Related to the GHC plugin, Haskell-to-Plutus compiler, on-chain code -[] *Plutus Application Framework* Related to the Plutus application backend (PAB), emulator, Plutus libraries -[] *Marlowe* Related to Marlowe -[] *Other* Any other topic (Playgrounds, etc.) - ## Summary A clear and specific description of what the bug is. diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml deleted file mode 100644 index c83bfb8d75..0000000000 --- a/.github/workflows/benchmark.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Benchmarking -on: - issue_comment: - types: [created] - -jobs: - benchmark: - runs-on: ubuntu-latest - env: - BUILDKITE_API_ACCESS_TOKEN: ${{ secrets.BUILDKITE_API_ACCESS_TOKEN }} - steps: - # - # Checkout the repository so that we can use scripts from it - # - - name: Checkout - uses: actions/checkout@v2 - - # - # Check for '/benchmark' comments on a PR - # - - uses: khan/pull-request-comment-trigger@master - id: check - with: - trigger: '/benchmark' - reaction: rocket - env: - GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' - - # - # Extract the branch of that the PR the comment was added to belongs to - # - - uses: xt0rted/pull-request-comment-branch@v1 - if: steps.check.outputs.triggered == 'true' - id: comment-branch - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - - # - # Trigger the buildkite pipeline IF the 'benchmark' command was found - # - - run: | - ./scripts/trigger-buildkite-pipeline.sh ${{ steps.comment-branch.outputs.head_ref }} ${{ github.event.issue.number }} - if: steps.check.outputs.triggered == 'true' diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index ce7385c90a..f3ada70649 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -2,9 +2,9 @@ name: "Deploy" on: push: branches: - - marlowe-dist-demo # production, TODO this should be from the marlowe-production branch - - master # staging - # TODO need branches for wyohack and plutus-production + # Commented out until plutus-ops is updated for the repo split + # - main # staging + - fake-branch-which-will-not-exist-prior-to-ops-update jobs: deploy: runs-on: ubuntu-latest diff --git a/.github/workflows/flakes-update.yml b/.github/workflows/flakes-update.yml deleted file mode 100644 index 338ec3bbd4..0000000000 --- a/.github/workflows/flakes-update.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Automated Nix flakes-managed dependency updates -on: - schedule: - # * is a special character in YAML so you have to quote this string - # run this every day at 4:00am - - cron: '0 4 * * *' -jobs: - nix-flakes-updater: - name: 'Create PRs for Nix flakes-managed dependencies' - runs-on: ubuntu-latest - # Borrowed from https://github.com/Mic92/dotfiles/blob/9c2fc2cc98021dd8d42bbb714278785a9a6757c2/.github/workflows/upgrade-flakes.yml - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v14 - with: - install_url: https://github.com/numtide/nix-flakes-installer/releases/download/nix-2.4pre20210415_76980a1/install - extra_nix_config: | - experimental-features = nix-command flakes - - name: Make changes to pull request - run: nix flake update - - name: Create Pull Request - id: cpr - uses: peter-evans/create-pull-request@v3 - with: - commit-message: Update flakes - committer: GitHub - author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> - signoff: false - branch: flake-updates - delete-branch: true - title: 'Upgrade flakes' - body: | - Update report - - Updated with *today's* date - - Auto-generated by [create-pull-request][1] - [1]: https://github.com/peter-evans/create-pull-request - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" diff --git a/.gitignore b/.gitignore index 1e5eaeab2f..c49e24f99f 100644 --- a/.gitignore +++ b/.gitignore @@ -47,9 +47,6 @@ stack.yaml.lock *.prof .liquid/ -# Agda -*.agdai - # Python __pycache__ diff --git a/ARCHITECTURE.adoc b/ARCHITECTURE.adoc index 897f15d21b..9eed2127fc 100644 --- a/ARCHITECTURE.adoc +++ b/ARCHITECTURE.adoc @@ -1,32 +1,13 @@ -= Architecture of the Plutus project += Architecture of the Plutus apps project :email: plutus@iohk.io :author: Input Output HK Limited :toc: left :reproducible: -This document is an overview of how the software in the Plutus project is +This document is an overview of how the software in the Plutus apps project is structured. Most things are in direct subdirectories of the root, so we will work through those in conceptual order. -== Plutus Core - -Plutus Core is the language that actually goes on the blockchain. Consequently -this is the absolute core of the codebase, and everything depends on it. -It also includes the Plutus IR intermediary language. - -include::plutus-core/ARCHITECTURE.adoc[] - -== Plutus Tx - -Plutus Tx is how we refer to the subset of Haskell which we compile into Plutus -Core. This is how users actually write Plutus contracts: they write Haskell -programs, part of which is compiled into Plutus Core. The rest of the program -can then use this compiled code when submitting transactions. - -include::plutus-tx/ARCHITECTURE.adoc[] - -include::plutus-tx-plugin/ARCHITECTURE.adoc[] - == Ledger To write actual Plutus contracts we need a model of the ledger that we're @@ -56,70 +37,26 @@ include::plutus-contract/ARCHITECTURE.adoc[] include::plutus-use-cases/ARCHITECTURE.adoc[] -== Marlowe +== Playground -Although conceptually separate from Plutus, a fair amount of Marlowe-related -code is in the Plutus repository. This is for two different reasons: - -. We have an implementation of Marlowe on top of Plutus, which depends on the -other code in the repository. -. The Marlowe Playground shares code and deployment infrastructure with the -Plutus Playground. - -include::marlowe/ARCHITECTURE.adoc[] - -include::marlowe-symbolic/ARCHITECTURE.adoc[] - -include::marlowe-actus/ARCHITECTURE.adoc[] - -== Playgrounds - -The Plutus/Marlowe Playgrounds are our web-based environment for developing and -testing basic Plutus and Marlowe contracts. That means they're the main way that +The Plutus Playground is our web-based environment for developing and +testing basic Plutus contracts. That means it's the main way that anyone outside the team has interacted with out product! include::playground-common/ARCHITECTURE.adoc[] include::plutus-playground-server/ARCHITECTURE.adoc[] -include::marlowe-playground-server/ARCHITECTURE.adoc[] - include::plutus-playground-client/ARCHITECTURE.adoc[] -include::marlowe-playground-client/ARCHITECTURE.adoc[] - -include::playground-common/ARCHITECTURE.adoc[] - include::web-common/ARCHITECTURE.adoc[] -include::deployment/ARCHITECTURE.adoc[] - == Smart Contract Backend The smart contract backend provides the runtime environment for compiled Plutus contracts. It manages the state of contract instances and brokers messages between them and the nodes, users, and wallets that they interact with. include::plutus-pab/ARCHITECTURE.adoc[] -== Documentation - -include::marlowe/ARCHITECTURE.adoc[] - -include::notes/ARCHITECTURE.adoc[] - -== Specification and design - -We have done a fair amount of work in specifying and formalizing parts of our -system. At the moment all of this work also lives in the Plutus repository, and -we even have some basic testing of the Haskell implementation against the Agda formalization. - -include::plutus-metatheory/ARCHITECTURE.adoc[] - -include::papers/ARCHITECTURE.adoc[] - -include::plutus-core-spec/ARCHITECTURE.adoc[] - -include::extended-utxo-spec/ARCHITECTURE.adoc[] - == Build tooling include::nix/ARCHITECTURE.adoc[] diff --git a/CONTRIBUTING.adoc b/CONTRIBUTING.adoc index 875df9f9d6..84d67dfa09 100644 --- a/CONTRIBUTING.adoc +++ b/CONTRIBUTING.adoc @@ -13,7 +13,6 @@ This includes a variety of useful tools: * The right version of GHC with all the external Haskell dependencies in its package database. * `cabal-install` -* Agda with the right libraries set up to build our Agda code * `stylish-haskell` * `haskell-language-server` * `purescript-language-server` @@ -305,7 +304,7 @@ Use the Github "draft" feature for this. Until a PR is merged, the branch is yours to do with as you will. In particular, rebasing and force-pushing is fine. -Indeed, if you need to update your branch with changes from master, rebasing is typically better than merging. +Indeed, if you need to update your branch with changes from main, rebasing is typically better than merging. So please do use this ability where it helps, for example: @@ -371,9 +370,9 @@ Pull requests cannot be merged without at least the Hydra CI check being green. NOTE: This isn't strictly true: repository admins (notably Michael) can force-merge PRs without the checks being green If you really need this, ask. -CI checks are run on the tip of the PR branch, not on the merge commit that is created with master. +CI checks are run on the tip of the PR branch, not on the merge commit that is created with main. As a result, it's possible to create a "semantic" merge commit where the CI passes on commits C1 and C2, but not on the merge of C1 and C2. -In this circumstance we can end up with the CI checks being broken on master. +In this circumstance we can end up with the CI checks being broken on main. However, this is sufficiently infrequent that we just live with the possibility, since eliminating it is quite awkward. ==== Hydra @@ -404,55 +403,3 @@ These perform some of the same checks as Hydra, but Github Actions is often more ==== Buildkite Buildkite currently only performs the continuous deployment steps. - -=== Continuous deployment - -Several packages are continuously deployed to a staging environment. See link:./deployment/README.md[deployment/README.md] for detailed information on the deployment setup. - -== Integrating with other Cardano projects - -=== The Cardano ledger (`cardano-ledger-specs`) - -We have a cyclic dependency with `cardano-ledger-specs`: -1. `cardano-ledger-alonzo` depends on `plutus-ledger-api`. -2. `plutus-ledger`/`plutus-contract` etc. depend on `cardano-api`, which depends on `cardano-node`, which depends on `cardo-ledger-alonzo`. - -This is possible because the cycle is not between *packages*, merely between the respositories. -Still, it causes some problems: if we make a change to `plutus-ledger-api` or below that causes `cardano-ledger-alonzo` to not compile, we cannot immediately merge that change! -To do that, we would need to change our `source-repository-package` pin to point to a version of `cardano-ledger-specs` that works with that change. -But we can't easily make such a PR to `cardano-ledger-specs` either: that would require them to pin a version of `plutus` where everything worked. - -To make this less painful, we have a (slightly complicated) workflow for such changes. - -==== Staging branch - -Firstly, there is a branch `plutus-staging` in `cardano-ledger-specs`. -The rules for `plutus-staging` are: -1. It should pass the `cardano-ledger-specs` CI, so it is always mergeable. -This means that you *will* need to update the `plutus` pin when you make changes to `plutus-staging`. -2. It is allowed for the `plutus` pin in `plutus-staging` to point to a _non-master_ commit, such as a commit in an unmerged PR. -3. The commit pointed to by the `plutus` pin in `plutus-staging` must continue to exist: we should not squash or rebase it away. -3. When merging `plutus-staging` into `master`, the `plutus` pin should be updated to point to a _master_ commit. - -==== PR workflow - -1. Create a `plutus` PR (#1) with your changes. -2. Create a `cardano-ledger-specs` PR (#2) targeting `plutus-staging` with the necessary changes in `cardano-ledger-specs`. -The `plutus` pin should point to a commit on the PR #1 branch. -While you are working, you can point the `cardano-ledger-specs` pin in your PR #1 branch to the tip of PR #2, so that everything compiles. -3. Once the changes to `cardano-ledger-specs` have settled down, we can merge PR #2. -After this be careful not to remove the commit from PR #1 which you used in `plutus-staging`! -4. Update the `cardano-ledger-specs` pin in PR #1 to point to `plutus-staging`. -5. Once the changes overall settle down, we can merge PR #1. - -==== Merging the staging branches - -We can do several PRs that touch `cardano-ledger-specs` before we merge `plutus-staging` into master, but it's best not to let it diverge too much. -The process is: - -1. Open a PR in `cardano-ledger-specs` merging `plutus-staging` into master, and updating the `plutus` pin to point to master. -2. You may need to rerun the `plutus-preprocessor` in `cardano-ledger-specs` before doing this. -It generates some test scripts, and isn't done automatically. -3. Open a PR in `plutus` updating the `cardano-ledger-specs` pin to point to master. -This can happen later. - diff --git a/README.adoc b/README.adoc index 0d07593c0d..9add40558f 100644 --- a/README.adoc +++ b/README.adoc @@ -1,31 +1,24 @@ -= https://github.com/input-output-hk/plutus[The Plutus Platform and Marlowe] += https://github.com/input-output-hk/plutus-apps[The Plutus Application Framework] :email: plutus@iohk.io :author: Input Output HK Limited :toc: left :reproducible: -The Plutus Platform is an application development platform for developing distributed applications using the Cardano blockchain; and Marlowe is a platform specifically for financial products, built on top of Plutus. +The Plutus Application Framework, part of the Plutus Platform, is a framework for developing distributed applications using the Cardano blockchain. For more information about the projects, see the <>. This repository contains: * Plutus Platform -** The implementation, specification, and mechanized metatheory of Plutus Core, the scripting language embedded in the Cardano ledger. -** Plutus Tx, the compiler from Haskell to Plutus Core. ** Libraries which implement the Plutus Application Framework, a framework for writing applications that work with Cardano. ** A selection of end-to-end usecases written with the Plutus Application Framework ** The Plutus Playground, a web-based playground for learning and writing basic Plutus Applications. -* Marlowe -** The implementation of the Marlowe domain-specific language. -** Tools for working with Marlowe, including static analysis. -** A selection of examples using Marlowe, including a number based on the ACTUS financial standard. -** The Marlowe Playground, a web-based playground for learning and writing Marlowe Applications. [IMPORTANT] ==== -The rest of this README is focussed on people who want to develop or contribute to the Platform. +The rest of this README is focussed on people who want to develop or contribute to the Framework. -For people who want to *use* the Platform, please consult the <>. +For people who want to *use* the Framework, please consult the <>. ==== [[cache-warning]] @@ -52,22 +45,12 @@ The main documentation is located https://plutus.readthedocs.io/en/latest/[here] === Specifications and design - https://hydra.iohk.io/job/Cardano/plutus/linux.docs.plutus-report/latest/download-by-type/doc-pdf/plutus[Plutus Technical Report] (draft) -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.plutus-core-spec/latest/download-by-type/doc-pdf/plutus-core-specification[Plutus Core Specification] -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.extended-utxo-spec/latest/download-by-type/doc-pdf/extended-utxo-specification[Extended UTXO Model] - -=== Academic papers - -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.papers.unraveling-recursion/latest/download-by-type/doc-pdf/unraveling-recursion[Unraveling Recursion] (https://doi.org/10.1007/978-3-030-33636-3_15[published version]) -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.papers.system-f-in-agda/latest/download-by-type/doc-pdf/paper[System F in Agda] (https://doi.org/10.1007/978-3-030-33636-3_10[published version]) -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.papers.eutxo/latest/download-by-type/doc-pdf/eutxo[The Extended UTXO Model] (in press) -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.papers.utxoma/latest/download-by-type/doc-pdf/utxoma[UTXOma: UTXO with Multi-Asset Support] (in press) -- https://hydra.iohk.io/job/Cardano/plutus/linux.docs.papers.eutxoma/latest/download-by-type/doc-pdf/eutxoma[Native Custom Tokens in the Extended UTXO Model] (in press) == Working with the project === How to submit an issue -Issues can be filed in the https://github.com/input-output-hk/plutus/issues[GitHub Issue tracker]. +Issues can be filed in the https://github.com/input-output-hk/plutus-apps/issues[GitHub Issue tracker]. However, note that this is pre-release software, so we will not usually be providing support. @@ -81,7 +64,7 @@ See link:CONTRIBUTING{outfilesuffix}[CONTRIBUTING], which describes our processe None of our libraries are on Hackage, unfortunately (many of our dependencies aren't either). So for the time being, you need to: -. Add `plutus` as a `source-repository-package` to your `cabal.project`. +. Add `plutus-apps` as a `source-repository-package` to your `cabal.project`. . Copy the `source-repository-package` stanzas from our `cabal.project` to yours. . Copy additional stanzas from our `cabal.project` as you need, e.g. you may need some of the `allow-newer` stanzas. @@ -114,12 +97,11 @@ If you use Nix, these tools are provided for you via `shell.nix`, and you do *no * If you want to build our Haskell packages with https://www.haskell.org/cabal/[`cabal`], then install it. * If you want to build our Haskell packages with https://haskellstack.org/[`stack`], then install it. -* If you want to build our Agda code, then install https://github.com/agda/agda[Agda] and the https://github.com/agda/agda-stdlib[standard library]. [[building-with-nix]] ==== How to build the Haskell packages and other artifacts with Nix -Run `nix build -f default.nix plutus.haskell.packages.plutus-core.components.library` from the root to build the Plutus Core library. +Run `nix build -f default.nix plutus-apps.haskell.packages.plutus-pab.components.library` from the root to build the Plutus PAB library. See <> to find out what other attributes you can build. @@ -134,7 +116,7 @@ The best way is to do this is inside a `nix-shell`. For fresh development setups, you also need to run `cabal update`. ==== -Run `cabal build plutus-core` from the root to build the Plutus Core library. +Run `cabal build plutus-pab` from the root to build the Plutus PAB library. See the link:./cabal.project[cabal project file] to see the other packages that you can build with `cabal`. @@ -196,14 +178,12 @@ These can be built using `nix build`. For example: ---- -nix build -f default.nix docs.papers.eutxo +nix build -f default.nix docs.site ---- .Example attributes -* Project packages: defined inside `plutus.haskell.packages` -** e.g. `plutus.haskell.packages.plutus-core.components.library` -* Documents: defined inside `docs` -** e.g. `docs.plutus-core-spec` +* Project packages: defined inside `plutus-apps.haskell.packages` +** e.g. `plutus-apps.haskell.packages.plutus-pab.components.library` There are other attributes defined in link:./default.nix[`default.nix`]. diff --git a/bitte/default.nix b/bitte/default.nix index 1a6bd729e1..cd6f810752 100644 --- a/bitte/default.nix +++ b/bitte/default.nix @@ -1,13 +1,9 @@ -{ marlowe-playground, plutus-playground, web-ghc, marlowe-pab, marlowe-dashboard, marlowe-web, docs, pkgs }: +{ plutus-playground, web-ghc, docs, pkgs }: let staticSite = pkgs.callPackage ./static-site.nix { }; playgroundStatic = pkgs.callPackage ./playground-static.nix { inherit staticSite; docs = docs.site; }; in { - web-ghc-server-entrypoint = pkgs.callPackage ./web-ghc-server.nix { - web-ghc-server = web-ghc; - }; - plutus-playground-server-entrypoint = pkgs.callPackage ./plutus-playground-server.nix { variant = "plutus"; pkg = plutus-playground.server; @@ -16,22 +12,4 @@ in client = plutus-playground.client; variant = "plutus"; }; - - marlowe-playground-server-entrypoint = pkgs.callPackage ./plutus-playground-server.nix { - variant = "marlowe"; - pkg = marlowe-playground.server; - }; - marlowe-playground-client-entrypoint = playgroundStatic { - client = marlowe-playground.client; - variant = "marlowe"; - }; - - marlowe-run-entrypoint = pkgs.callPackage ./pab.nix { - pabExe = "${marlowe-pab}/bin/marlowe-pab"; - staticPkg = marlowe-dashboard.client; - }; - - marlowe-website-entrypoint = staticSite { - root = marlowe-web; - }; } diff --git a/bitte/pab.nix b/bitte/pab.nix deleted file mode 100644 index 102295e9c4..0000000000 --- a/bitte/pab.nix +++ /dev/null @@ -1,110 +0,0 @@ -{ writeShellScriptBin, writeText, pabExe, staticPkg, cacert, coreutils, lib, gnused, utillinux }: -let - dbFile = "/var/lib/pab/pab-core.db"; - - # /var/lib isn't right but whatever - pabYaml = "/var/lib/pab/pab.yaml"; - - slotZeroTime = 1596059091000; # POSIX time of slot zeron is milliseconds. See note [Datetime to slot] in Marlowe.Slot - slotLengthMillis = 1000; - - constantFee = 10; # Constant fee per transaction in lovelace - scriptsFeeFactor = 0.0; # Factor by which to multiply the size-dependent scripts fee in lovelace - - pabYamlIn = writeText "pab.yaml.in" (builtins.toJSON { - dbConfig = { - dbConfigFile = dbFile; - dbConfigPoolSize = 20; - }; - - pabWebserverConfig = { - baseUrl = "http://localhost:@WEBSERVER_PORT@"; - staticDir = "${staticPkg}"; - permissiveCorsPolicy = false; - }; - - walletServerConfig = { - baseUrl = "http://localhost:@WALLET_PORT@"; - wallet = { - getWallet = 1; - }; - }; - - nodeServerConfig = { - mscBaseUrl = "http://localhost:@NODE_PORT@"; - mscSocketPath = "/tmp/node-server.sock"; - mscRandomTxInterval = 20000000; - mscSlotConfig = { - scSlotZeroTime = slotZeroTime; - scSlotLength = slotLengthMillis; - }; - mscFeeConfig = { - fcConstantFee = { - getLovelace = constantFee; - }; - fcScriptsFeeFactor = scriptsFeeFactor; - }; - mscNetworkId = ""; # Empty string for Mainnet. Put a network magic number in the string to use the Testnet. - mscKeptBlocks = 100000; - mscInitialTxWallets = [ - { getWallet = 1; } - { getWallet = 2; } - { getWallet = 3; } - ]; - mscNodeMode = "MockNode"; - }; - - chainIndexConfig = { - ciBaseUrl = "http://localhost:@CHAIN_INDEX_PORT@"; - ciWatchedAddresses = [ ]; - }; - - requestProcessingConfig = { - requestProcessingInterval = 1; - }; - - signingProcessConfig = { - spBaseUrl = "http://localhost:@SIGNING_PROCESS_PORT@"; - spWallet = { - getWallet = "1"; - }; - }; - }); - - # Note: The db is dropped as a workaround for a problem with - # eventful which crashes PAB. Currently data persistence is not - # relevant, but the problem *will* occur again when the DB removal - # is removed unless the underlying problem is identified/fixed. - pab-init-cmd = writeShellScriptBin "pab-init-cmd" '' - set -eEuo pipefail - - echo "[pab-init-cmd]: Dropping PAB database file '${dbFile}'" >&2 - rm -rf ${dbFile} - - echo "[pab-init-cmd]: Creating new DB '${dbFile}'" >&2 - ${pabExe} --config=${pabYaml} migrate; - ''; -in -writeShellScriptBin "entrypoint" '' - set -eEuo pipefail - - export PATH=${lib.makeBinPath [ coreutils gnused utillinux ]} - - export SYSTEM_CERTIFICATE_PATH=${cacert}/etc/ssl/certs/ca-bundle.crt - - sed -e "s|@WEBSERVER_PORT@|$((PORT_RANGE_BASE))|g" \ - -e "s|@NODE_PORT@|$((PORT_RANGE_BASE + 1))|g" \ - -e "s|@CHAIN_INDEX_PORT@|$((PORT_RANGE_BASE + 2))|g" \ - -e "s|@SIGNING_PROCESS_PORT@|$((PORT_RANGE_BASE + 3))|g" \ - -e "s|@WALLET_PORT@|$((PORT_RANGE_BASE + 4))|g" \ - ${pabYamlIn} > ${pabYaml} - - - ${pab-init-cmd}/bin/pab-init-cmd - - # Ugly ugly hack to kill the PAB at midnight UTC - ${pabExe} --config=${pabYaml} all-servers& - sleep $(($(date -f - +%s- <<< $'tomorrow 00:00\nnow')0))& - wait -n - exit 1 -'' diff --git a/bitte/web-ghc-server.nix b/bitte/web-ghc-server.nix deleted file mode 100644 index fc7f9a44eb..0000000000 --- a/bitte/web-ghc-server.nix +++ /dev/null @@ -1,11 +0,0 @@ -{ writeShellScriptBin, web-ghc-server, symlinkJoin }: - -let - entrypoint = writeShellScriptBin "entrypoint" '' - ${web-ghc-server}/bin/web-ghc-server webserver -p $PORT --bind 0.0.0.0 - ''; -in -symlinkJoin { - name = "entrypoint"; - paths = [ entrypoint ]; -} diff --git a/cabal.project b/cabal.project index 2b91c54179..bd1a8acfc4 100644 --- a/cabal.project +++ b/cabal.project @@ -2,32 +2,16 @@ index-state: 2021-08-14T00:00:00Z packages: doc - fake-pab freer-extras - marlowe - marlowe-actus - marlowe-playground-server - marlowe-dashboard-server - marlowe-symbolic playground-common - plutus-benchmark plutus-chain-index plutus-contract - plutus-core - plutus-errors plutus-ledger - plutus-ledger-api - plutus-metatheory plutus-pab plutus-playground-server - plutus-tx - plutus-tx-plugin plutus-use-cases - prettyprinter-configurable quickcheck-dynamic web-ghc - word-array - stubs/plutus-ghc-stub -- We never, ever, want this. write-ghc-environment-files: never @@ -40,7 +24,7 @@ benchmarks: true test-show-details: streaming allow-newer: - -- Pins to an old version of Template Haskell, unclear if/when it will be updated + -- Copied from plutus-core size-based:template-haskell , ouroboros-consensus-byron:formatting , beam-core:aeson @@ -48,6 +32,7 @@ allow-newer: , beam-sqlite:dlist , beam-migrate:aeson +-- Copied from plutus-core constraints: -- big breaking change here, inline-r doens't have an upper bound singletons < 3.0 @@ -56,10 +41,6 @@ constraints: -- constraint from dependent-sum-template (which is the library we actually use). , dependent-sum > 0.6.2.0 --- See the note on nix/pkgs/default.nix:agdaPackages for why this is here. --- (NOTE this will change to ieee754 in newer versions of nixpkgs). -extra-packages: ieee, filemanip - -- These packages appear in our dependency tree and are very slow to build. -- Empirically, turning off optimization shaves off ~50% build time. -- It also mildly improves recompilation avoidance. @@ -73,8 +54,7 @@ package ouroboros-consensus-cardano package cardano-api optimization: False --- https://github.com/Quid2/flat/pull/22 fixes a potential exception --- when decoding invalid (e.g. malicious) text literals. +-- Copied from plutus-core source-repository-package type: git location: https://github.com/Quid2/flat.git @@ -91,11 +71,13 @@ source-repository-package location: https://github.com/input-output-hk/servant-purescript.git tag: a0c7c7e37c95564061247461aef4be505a853538 +-- Copied from plutus-core source-repository-package type: git location: https://github.com/input-output-hk/cardano-crypto.git tag: 07397f0e50da97eaa0575d93bee7ac4b2b2576ec +-- Copied from plutus-core source-repository-package type: git location: https://github.com/input-output-hk/cardano-base @@ -112,6 +94,7 @@ source-repository-package slotting strict-containers +-- Copied from plutus-core source-repository-package type: git location: https://github.com/input-output-hk/cardano-prelude @@ -203,7 +186,7 @@ source-repository-package cardano-ledger-core alonzo/impl --- A lot of plutus dependencies have to be synchronized with the dependencies of +-- A lot of plutus-apps dependencies have to be synchronized with the dependencies of -- cardano-node. If you update cardano-node, please make sure that all dependencies -- of cardano-node are also updated. source-repository-package @@ -228,10 +211,21 @@ source-repository-package source-repository-package type: git - location: https://github.com/input-output-hk/hedgehog-extras - tag: edf6945007177a638fbeb8802397f3a6f4e47c14 + location: https://github.com/input-output-hk/goblins + tag: cde90a2b27f79187ca8310b6549331e59595e7ba +-- A lot of plutus-apps dependencies have to be syncronized with the dependencies of +-- plutus. If you update plutus, please make sure that all dependencies of plutus +-- are also updated source-repository-package type: git - location: https://github.com/input-output-hk/goblins - tag: cde90a2b27f79187ca8310b6549331e59595e7ba + location: https://github.com/input-output-hk/plutus + tag: 0cb5c00add3809d9f247e9ec3f069d9ac3becd95 + subdir: + plutus-core + plutus-ledger-api + plutus-tx + plutus-tx-plugin + word-array + prettyprinter-configurable + stubs/plutus-ghc-stub diff --git a/ci.nix b/ci.nix index f66442a674..0b413865b7 100644 --- a/ci.nix +++ b/ci.nix @@ -5,9 +5,11 @@ , rootsOnly ? false # We explicitly pass true here in the GitHub action but don't want to slow down hydra , checkMaterialization ? false +, sourcesOverride ? { } +, sources ? import ./nix/sources.nix { system = builtins.currentSystem; } // sourcesOverride }: let - inherit (import ./nix/lib/ci.nix) dimension platformFilterGeneric filterAttrsOnlyRecursive filterSystems; + inherit (import (sources.plutus-core + "/nix/lib/ci.nix")) dimension platformFilterGeneric filterAttrsOnlyRecursive filterSystems; # limit supportedSystems to what the CI can actually build # currently that is linux and darwin. systems = filterSystems supportedSystems; @@ -56,7 +58,7 @@ let let packages = import ./default.nix { inherit system crossSystem checkMaterialization; }; pkgs = packages.pkgs; - plutus = packages.plutus; + plutus-apps = packages.plutus-apps; # Map `crossSystem.config` to a name used in `lib.platforms` platformString = if crossSystem == null then system @@ -74,25 +76,25 @@ let filterAttrsOnlyRecursive (_: drv: isBuildable drv) ({ # The haskell.nix IFD roots for the Haskell project. We include these so they won't be GCd and will be in the # cache for users - inherit (plutus.haskell.project) roots; + inherit (plutus-apps.haskell.project) roots; } // pkgs.lib.optionalAttrs (!rootsOnly) (filterCross { # build relevant top level attributes from default.nix - inherit (packages) docs tests plutus-playground marlowe-playground marlowe-dashboard marlowe-dashboard-fake-pab plutus-pab plutus-use-cases deployment; + inherit (packages) docs tests plutus-playground plutus-pab plutus-use-cases; # Build the shell expression to be sure it works on all platforms # # The shell should never depend on any of our Haskell packages, which can # sometimes happen by accident. In practice, everything depends transitively - # on 'plutus-core', so this does the job. + # on 'plutus-ledger', so this does the job. # FIXME: this should simply be set on the main shell derivation, but this breaks # lorri: https://github.com/target/lorri/issues/489. In the mean time, we set it # only on the CI version, so that we still catch it, but lorri doesn't see it. shell = (import ./shell.nix { inherit packages; }).overrideAttrs (attrs: attrs // { - disallowedRequisites = [ plutus.haskell.packages.plutus-core.components.library ]; + disallowedRequisites = [ plutus-apps.haskell.packages.plutus-ledger.components.library ]; }); # build all haskell packages and tests - haskell = pkgs.recurseIntoAttrs (mkHaskellDimension pkgs plutus.haskell.projectPackages); + haskell = pkgs.recurseIntoAttrs (mkHaskellDimension pkgs plutus-apps.haskell.projectPackages); })); in dimension "System" systems (name: sys: _select name sys null) diff --git a/default.nix b/default.nix index 2036c9122d..4267e187f4 100644 --- a/default.nix +++ b/default.nix @@ -1,13 +1,13 @@ ######################################################################## -# default.nix -- The top-level nix build file for Plutus. +# default.nix -- The top-level nix build file for Plutus apps. # # This file defines various attributes that are used for building and -# developing Plutus. +# developing Plutus apps. # ######################################################################## { system ? builtins.currentSystem , crossSystem ? null -, config ? { allowUnfreePredicate = (import ./nix/lib/unfree.nix).unfreePredicate; } +, config ? { } , sourcesOverride ? { } , sources ? import ./nix/sources.nix { inherit system; } // sourcesOverride , haskellNix ? import sources.haskell-nix { @@ -18,7 +18,6 @@ }; } , packages ? import ./nix { inherit system sources crossSystem config sourcesOverride haskellNix checkMaterialization enableHaskellProfiling; } - # An explicit git rev to use, passed when we are in Hydra # Whether to check that the pinned shas for haskell.nix are correct. We want this to be # false, generally, since it does more work, but we set it to true in the CI , checkMaterialization ? false @@ -26,63 +25,33 @@ , enableHaskellProfiling ? false }: let - inherit (packages) pkgs plutus sources; - inherit (pkgs) lib haskell-nix; - inherit (plutus) haskell agdaPackages; - inherit (plutus) easyPS sphinxcontrib-haddock; + inherit (packages) pkgs plutus-apps; + inherit (plutus-apps) haskell; in rec { - inherit pkgs plutus; + inherit pkgs plutus-apps; - inherit (plutus) web-ghc; + inherit (plutus-apps) web-ghc; inherit (haskell.packages.plutus-pab.components.exes) plutus-pab-examples plutus-uniswap; - inherit (haskell.packages.marlowe.components.exes) marlowe-pab; - - webCommon = pkgs.callPackage ./web-common { inherit (plutus.lib) gitignore-nix; }; - webCommonPlutus = pkgs.callPackage ./web-common-plutus { inherit (plutus.lib) gitignore-nix; }; - webCommonMarlowe = pkgs.callPackage ./web-common-marlowe { inherit (plutus.lib) gitignore-nix; }; - webCommonPlayground = pkgs.callPackage ./web-common-playground { inherit (plutus.lib) gitignore-nix; }; + webCommon = pkgs.callPackage ./web-common { inherit (plutus-apps.lib) gitignore-nix; }; + webCommonPlutus = pkgs.callPackage ./web-common-plutus { inherit (plutus-apps.lib) gitignore-nix; }; + webCommonPlayground = pkgs.callPackage ./web-common-playground { inherit (plutus-apps.lib) gitignore-nix; }; plutus-playground = pkgs.recurseIntoAttrs rec { - haddock = plutus.plutus-haddock-combined; + haddock = plutus-apps.plutus-haddock-combined; inherit (pkgs.callPackage ./plutus-playground-client { - inherit (plutus.lib) buildPursPackage buildNodeModules filterNpm gitignore-nix; + inherit (plutus-apps.lib) buildPursPackage buildNodeModules filterNpm gitignore-nix; inherit haskell webCommon webCommonPlutus webCommonPlayground; }) client server generate-purescript start-backend; }; - marlowe-playground = pkgs.recurseIntoAttrs rec { - inherit (pkgs.callPackage ./marlowe-playground-client { - inherit (plutus.lib) buildPursPackage buildNodeModules filterNpm gitignore-nix; - inherit haskell webCommon webCommonMarlowe webCommonPlayground; - }) client server generate-purescript start-backend; - }; - - marlowe-dashboard = pkgs.recurseIntoAttrs rec { - inherit (pkgs.callPackage ./marlowe-dashboard-client { - inherit haskell plutus-pab; - inherit (plutus.lib) buildPursPackage buildNodeModules filterNpm gitignore-nix; - inherit webCommon webCommonMarlowe; - }) client server-setup-invoker marlowe-invoker generated-purescript generate-purescript start-backend; - }; - - marlowe-dashboard-fake-pab = pkgs.recurseIntoAttrs rec { - inherit (pkgs.callPackage ./fake-pab { - inherit marlowe-dashboard; - inherit (plutus.lib) buildPursPackage buildNodeModules filterNpm gitignore-nix; - inherit haskell webCommon webCommonMarlowe; - }) client fake-pab-exe fake-pab-generated-purescript; - }; - - marlowe-web = pkgs.callPackage ./marlowe-website { inherit (plutus.lib) npmlock2nix gitignore-nix; }; - plutus-pab = pkgs.recurseIntoAttrs (pkgs.callPackage ./plutus-pab-client { - inherit (plutus.lib) buildPursPackage buildNodeModules gitignore-nix filterNpm; + inherit (plutus-apps.lib) buildPursPackage buildNodeModules gitignore-nix filterNpm; inherit haskell webCommon webCommonPlutus; }); @@ -92,28 +61,19 @@ rec { tests = import ./nix/tests/default.nix { inherit pkgs docs; - inherit (plutus.lib) gitignore-nix; - inherit (plutus) fixStylishHaskell fixPurty fixPngOptimization; - inherit plutus-playground marlowe-playground marlowe-dashboard web-ghc plutus-pab marlowe-pab; + inherit (plutus-apps.lib) gitignore-nix; + inherit (plutus-apps) fixStylishHaskell fixPurty fixPngOptimization; + inherit plutus-playground web-ghc plutus-pab; src = ./.; }; - docs = import ./nix/docs.nix { inherit pkgs plutus; }; - - deployment = pkgs.recurseIntoAttrs (pkgs.callPackage ./deployment/morph { - plutus = { - inherit plutus-pab marlowe-dashboard marlowe-playground plutus-playground web-ghc docs marlowe-web marlowe-pab; - }; - }); + docs = import ./nix/docs.nix { inherit pkgs plutus-apps; }; # This builds a vscode devcontainer that can be used with the plutus-starter project (or probably the plutus project itself). - devcontainer = import ./nix/devcontainer/plutus-devcontainer.nix { inherit pkgs plutus; }; - - # Test data needed by marlowe-actus provided via niv - inherit (sources) actus-tests; + devcontainer = import ./nix/devcontainer/plutus-devcontainer.nix { inherit pkgs plutus-apps; }; - build-and-push-devcontainer-script = import ./nix/devcontainer/deploy/default.nix { inherit pkgs plutus; }; + build-and-push-devcontainer-script = import ./nix/devcontainer/deploy/default.nix { inherit pkgs plutus-apps; }; # Packages needed for the bitte deployment - bitte-packages = import ./bitte { inherit marlowe-playground plutus-playground web-ghc marlowe-pab marlowe-dashboard marlowe-web docs pkgs; }; + bitte-packages = import ./bitte { inherit plutus-playground docs pkgs; }; } diff --git a/deployment/ARCHITECTURE.md b/deployment/ARCHITECTURE.md deleted file mode 100644 index 823cb745e1..0000000000 --- a/deployment/ARCHITECTURE.md +++ /dev/null @@ -1,31 +0,0 @@ -# Infrastructure Architecture - -This document is a guide to the architecture we use for the plutus and marlowe infrastructure and is intended to help you gain a deeper understanding of how setup and deployment work. It is not a user guide, if you want detailed information about how to setup and deploy to the infrastructure, you should look at [./README.md](./README.md). - -The infrastructure is split into two parts: -* AWS is used to create a private network infrastructure and unconfigured EC2 instances with a load balancer to present HTTP endpoints to the internet. This is managed using [Terraform](https://www.terraform.io/). -* [Morph](https://github.com/DBCDK/morph) is used to configure the EC2 instances - -## AWS Architecture - -We use a [standard architecture](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html) recommended by AWS. Within this architecture we use an [Application Load Balancer](https://aws.amazon.com/blogs/devops/introducing-application-load-balancer-unlocking-and-optimizing-architectures/) to forward different URLs to the relevant services. - -![AWS Architecture](./architecture.svg) - -This is all managed using terraform and there are many resources on the internet that describe how this works e.g. [this example of this architecture in Terraform](https://medium.com/appgambit/terraform-aws-vpc-with-private-public-subnets-with-nat-4094ad2ab331). - -All our terraform code is defined in [./deployment/terraform](./deployment/terraform) - -## Morph - -Morph code is defined in [./deployment/morph](./deployment/morph) and more information about the structure of this can be viewed in [the README](./deployment/morph/README.md). - -## Glue code and scripts - -The terraform code will create EC2 instances that have IP addresses that we can't know ahead of time. Therefore we need to generate a file using terraform that can be read by morph so that it knows where to deploy things. In fact we create 4 files: -* machines.json - this file contains the private ip addresses and dns names of all the EC2 instances as well as some metadata such as the SSH keys that should be given access, the environment name etc. -* secrets.plutus.${env}.json - contains secret values used by the plutus playground that are stored encrypted using [pass](https://www.passwordstore.org/) in [../secrets]() -* secrets.marlowe.${env}.json - contains secret values used by the marlowe playground that are stored encrypted using [pass](https://www.passwordstore.org/) in [../secrets]() -* An ssh config file which should be included into your main ssh config in order to allow morph to use the bastion jump servers easily. FYI I have created [this issue](https://github.com/DBCDK/morph/issues/145) to try and avoid the need for doing this although the ssh config is still useful in order to make manual ssh connection to the machine easy, for example for debugging and inspecting logs etc. - -In addition to this, we provide shell scripts that are generated by nix in [./default.nix]](./default.nix) in order to make all required operations easier. It's important to note that we use an environmental variable `PLUTUS_ROOT` to tell terraform where to save these files. For this reason you should always run these scripts within a nix-shell unless you really know what you are doing. diff --git a/deployment/README.md b/deployment/README.md deleted file mode 100644 index 6bdfc88f73..0000000000 --- a/deployment/README.md +++ /dev/null @@ -1,269 +0,0 @@ -# Plutus/Marlowe Deployment - - staging environment deployment - - alpha environment deployment - - production environment deployment - -## Overview - -The deployment uses a combination of [Terraform](https://www.terraform.io/) and [Morph](https://github.com/DBCDK/morph) where Terraform creates the instances and network configuration on AWS and Morph deploys and activates the NixOS configurations. - -- [terraform configuration files](https://github.com/input-output-hk/plutus/tree/master/deployment/terraform) -- [morph deployment files](https://github.com/input-output-hk/plutus/tree/master/deployment/morph) -- [NixOS modules](https://github.com/input-output-hk/plutus/tree/master/nix/modules) - -### Continuous Delivery - -The [server configurations](https://github.com/input-output-hk/plutus/tree/master/deployment/morph/default.nix) that will be deployed via morph are built by hydra and are deployed by buildkite pipelines (see [pipeline.yml](https://github.com/input-output-hk/plutus/blob/master/.buildkite/pipeline.yml)). Two pipelines are set up for this purpose: - -- **master**: Successful [master branch builds](https://hydra.iohk.io/jobset/Cardano/plutus) are automatically deployed to the staging environment via the [_master_ pipeline](https://buildkite.com/input-output-hk/plutus/builds?branch=master) -- **production**: Production deployments are triggered by the [_production_ deployment pipeline on buildkite](https://buildkite.com/input-output-hk/plutus/builds?branch=master) whenever the `production` branch changes. - -### Environments - -There are several environments to deploy to. All available environments are listed in [deployment/envs.nix](https://github.com/input-output-hk/plutus/tree/master/deployment/envs.nix): - -```nix -# deployment/envs.nix (excerpt) -{ - alpha = { region = "eu-west-2"; }; - production = { region = "eu-west-1"; }; -} -``` - -Environments are deployed to `.iohkdev.io`. - -#### Deployment Versions -`staging` is the staging environment to which the `master` branch is deployed automatically. The `production` environment is reserved for the live environment. Additional environments are available for testing purposes. - -Ideally the `staging` deployment should always reflect the current state of `master`, and `production` should always reflect the current state of the `production` branch. This can be verified using the `/version` endpoint: - -```shell -$ curl https://staging.plutus.iohkdev.io/version -{"rev": "13342f6981faabdc2bb7e88a9cb5a3990f7a4930"} - -$ curl https://production.plutus.iohkdev.io/version -{"rev": "acc7a4486d50844690fb485b74abab44908bd39b"} -``` - -There is also an `alpha` environment, deployed automatically from the `alpha` branch. - -## Usage - -**NOTE**: Deployments to either the `staging` or the `production` environment **should never** be done manually unless there is a _good reason_ to do so. Instead, the buildkite pipelines should be used for this. - -#### Prerequisites -- AWS account -- Active plutus nix-shell environment - -In order to to deploy anything you need to log in to your AWS account: -```shell -$ eval $(aws-mfa-login ) -``` -The deployment scripts will validate the login status and abort if no valid session can be found. - -#### Commands - -The following deployment commands are made available through a nix-shell: - -- **provision-infra** provisions the infrastructure on AWS using terraform. -- **destroy-infra** deletes previously provisioned infrastructure on AWS using terraform. -- **deploy-nix** deploys nixos configurations to Terraform provisioned servers using morph. -- **deploy** runs both, `provision-infra` followed by `deploy-nix`. - -##### provision-infra - -```shell -$ nix-shell -A --run "provision-infra" -``` -- The `provision-infra` command executes `terraform apply` updating AWS to be in sync with the current state of configuration. -- Running `provision-infra` may destroy and/or create several, all, or no resources at all. Execution times will differ respectively. - -##### destroy-infra - -```shell -$ nix-shell -A --run "destroy-infra" -``` -- The `destroy-infra` command executes `terraform destroy` and destroys all resources previously created by terraform. - -##### deploy-nix - -```shell -$ nix-shell -A [--argstr rev ] --run "deploy-nix" -``` -- The deploy-nix command executes `morph deploy` to copy and activate the most recent nix packages -- If the environment infrastructure is not up to date (meaning `terraform apply` would not be a no-op) the deployment will abort. - -##### deploy - -```shell -$ nix-shell -A [--argstr rev ] --run "deploy" -``` -- The deploy command combines `provision-infra` and `deploy-nix`: It performs `terraform apply` followed by `morph deploy`. -- The `rev` argument is optional and defaults to `dev` when not specified. The value of `rev` is returned by the `/version` endpoint as explained above. - - -## Secrets - -The deployment depends on several credentials which are maintained using the [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/). The secrets are organized per environment with the following structure: - -```json -{ - "env": "", - "marlowe": { - "githubClientId": "", - "githubClientSecret": "", - "jwtSignature": "" - }, - "plutus": { - "githubClientId": "", - "githubClientSecret": "", - "jwtSignature": "" - } -} -``` - -The deployment scripts will obtain this json document for the respective environment and expose them to terraform through several -environment variables. - - -## Maintenance - -The sections below describe actions relevant for advanced usage or maintenance of the deployment process. - -### Adding Users -Adding new users that are able to perform deployments requires 2 individual steps: - -1. Creating an AWS account for the new user -2. Adding the user's ssh key - -The AWS login is required in order to provision infrastructure using Terraform. The ssh key has to be added in order to enable users to perform deployments with morph through the ssh jump host. - -#### AWS -The new user has to be added to the appropriate AWS organization. Please talk to a plutus/marlowe team member and request access. New -users with appropriate permissions have to be added manually through the aws console. - -#### Terraform -In order to perform nix deployments a ssh-key has to be configured in [deployment/terraform/locals.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/locals.tf): - -**1.** **Create a new ssh keypair**: - -``` -$ ssh-keygen -t ed25519 -``` - -**2.** **Add the user/key to the `ssh_keys` map in** [deployment/terraform/locals.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/locals.tf) - -``` - -ssh_keys = { - username = "ssh-ed25519 AAAAC...f3JfmL3A2 usernamer@host -} -``` - -**3**. **Add the new user to environments that they should be able to deploy to** - -In order to allow the user (ssh key) to deploy to the `testing` environment [deployment/terraform/locals.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/locals.tf) needs to be edited as shown below: - -``` - bastion_ssh_keys_ks = { - testing = ["username"] - ... - } - - root_ssh_keys_ks = { - testing = ["username"] - .. - } -``` - -### Adding Environments - -Deployments can be performed to different environments. Each environment is a full aws setup with multiple ec2 instances and networking, deployed to different `iohkdev.io` subdomains: - -- The `alpha` environment is deployed to `alpha.iohkdev.io` -- `testing` is deployed to `testing.iohkdev.io` - -Terraform uses different workspaces for each environment which are also separated in the shared state which is stored in a S3 bucket. When entering a nix-shell the respective terraform workspace is chosen automatically. - -In order to add a new environment the following steps need to be followed: - -**1. Add the environment to [deployment/envs.nix](https://github.com/input-output-hk/plutus/blob/master/deployment/envs.nix)** - -In order to add an environment `environment` it needs to be added to the attribute set in `deployment/envs.nix` as follows: - -```nix -{ - environment = { region = "eu-west-3"; }; -} -``` - -**2. Add users that can deploy to the environment**: - -Make sure that users that should be able to deploy to the new environment are added to it in [deployment/terraform/locals.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/locals.tf) as described above in section about adding users. - - -**3. Configure credentials for the environment**: - -In order for the deployment to work it requires access to the secrets described above in the **Secrets** section. The secrets are obtained from the _AWS Secrets Manager_ but they need to be imported first for every environment: - -First create a json file containing the necessary credentials in the `deployment` directory: -```json -{ - "env": "", - "marlowe": { - "githubClientId": "", - "githubClientSecret": "", - "jwtSignature": "" - }, - "plutus": { - "githubClientId": "", - "githubClientSecret": "", - "jwtSignature": "" - } -} -``` -Then use `aws-upload-secrets` to submit it: - -```shell -$ nix-shell aws-upload-secrets.nix --argstr env --run "aws-upload-secrets ./file.json" -``` - -You should now be able to acess the nix-shell for the newly created environment in which the credentials you just uploaded should be set in several environment variables: - -```shell -$ nix-shell -A -$ echo $TF_VAR_plutus_github_client_id # should print the value you just configured -``` - - -### Extending The Deployment - -The deployment process is split between provisioning the infrastructure on AWS using Terraform and deploying NixOS configurations with updated packages or service descriptions using morph. Depending on the respective changes, either one or both of these layers have to be updated. - -#### Adding Servers -The currently configured ec2 instances are easy to discover: - -- morph: [deployment/morph/machines.nix](https://github.com/input-output-hk/plutus/blob/master/deployment/morph/machines.nix) -- Terraform: [deployment/terraform/machines.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/machines.tf) - -The terraform file represents a local resource which is consumed by the morph configuration to obtain information that only terraform can provide. Both files represent the respective entry point to configuring a new server. On the Terraform side the ec2 instance hardware, network and SSL certificates have to be configured. On the morph side there has to be a NixOS configuration describing the software setup. - - -#### Adding Services -Assuming you only want to add a service to an existing server, you can follow these steps: - -- Expose relevant packages in [default.nix](https://github.com/input-output-hk/plutus/blob/master/default.nix) -- Create a NixOS module describing your service in [nix/modules](https://github.com/input-output-hk/plutus/tree/master/nix/modules) - - -#### Configuring A New Domain -Configuring a newly purchased domain for use with a deployment environment requires several changes, most of them to the Terraform code: -1. **Hosted Zone Configuration**: Add a new hosted zone in the route53 configuration on the aws console -1. **Update NS Entries**: Configure the new domain (externally) to use the name servers of the hosted zone -1. **ALB Configuration**: Configure the routing for the new domain in [loadbalancing.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/loadbalancing.tf) -1. **Configure Certificates**: Configure certificates for the new domain in [certificates.tf](https://github.com/input-output-hk/plutus/blob/master/deployment/terraform/certificates.tf) - -The changes in [this PR](https://github.com/input-output-hk/plutus/pull/3107) can be used as reference. diff --git a/deployment/architecture.svg b/deployment/architecture.svg deleted file mode 100644 index a0fbd3b735..0000000000 --- a/deployment/architecture.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - private subnet 1playgroundsmarlowe staticplutus staticmarlowe apiplutus apinginxweb_ghcweb-ghcprivate subnet 2Availability Zone BAvailability Zone AApplication Load Balancermarlowe-dash.iohkdev.io/marlowe.iohkdev.io/marlowe.iohkdev.io/runghcmarlowe.iohkdev.io/apigoguen.monitoring.iohkdev.ioplutus.iohkdev.io/plutus.iohkdev.io/runghcplutus.iohkdev.io/apibastion_b(ssh)bastion_a(ssh)public subnet 2public subnet 2VPCplaygroundsmarlowe staticplutus staticmarlowe apiplutus apinginxmonitoringprometheusweb_ghcweb-ghcmarlowe_dashmarlowe dashPAB \ No newline at end of file diff --git a/deployment/aws-upload-secrets.nix b/deployment/aws-upload-secrets.nix deleted file mode 100644 index 674e0afa2a..0000000000 --- a/deployment/aws-upload-secrets.nix +++ /dev/null @@ -1,32 +0,0 @@ -{ pkgs ? (import ./.. { }).pkgs -, env -}: -let - inherit (pkgs) mkShell writeShellScriptBin pass awscli; - aws-upload-secrets = env: - let - region = (import ./envs.nix)."${env}".region; - in - writeShellScriptBin "aws-upload-secrets" '' - set -eo pipefail - - if [ $# -ne 1 ]; then - echo "[aws-upload-secrets]: Error, Please specify a json file as input" - exit 1 - fi - - SECRETS_FILE="$1" - - echo "[aws-upload-secrets]: Validating input file '$SECRETS_FILE'" - cat $SECRETS_FILE | jq empty - echo "[aws-upload-secrets]: Uploading secrets for '${env}' in region '${region}'" - - ${awscli}/bin/aws secretsmanager create-secret --name "env/${env}" \ - --description "env/${env}" \ - --secret-string file://$SECRETS_FILE \ - --region ${region} - ''; -in -mkShell { - buildInputs = [ (aws-upload-secrets env) ]; -} diff --git a/deployment/envs.nix b/deployment/envs.nix deleted file mode 100644 index 94a6b5cc70..0000000000 --- a/deployment/envs.nix +++ /dev/null @@ -1,10 +0,0 @@ -{ - alpha = { region = "eu-west-2"; }; - production = { region = "eu-west-1"; }; - pablo = { region = "eu-west-3"; }; - testing = { region = "eu-west-3"; }; - hernan = { region = "us-west-2"; }; - amyas = { region = "eu-west-2"; }; - staging = { region = "eu-west-3"; }; - bitte_match = { region = "eu-central-1"; }; -} diff --git a/deployment/morph/.gitignore b/deployment/morph/.gitignore deleted file mode 100644 index 4dd714b890..0000000000 --- a/deployment/morph/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -machines.json -secrets.* \ No newline at end of file diff --git a/deployment/morph/README.md b/deployment/morph/README.md deleted file mode 100644 index 0910146719..0000000000 --- a/deployment/morph/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Morph Deployments - -Morph can be used from any computer with ssh access to the EC2 machines to deploy NixOS configuration. - -There is a slight complexity that I haven't yet solved. In order to deploy the machines you need to know their addresses, this information is stored in machines.json which is produced in a temporary directory every time you run the terraform deployment. You need to copy that file into this directory before running `morph deploy ./default.nix switch`. \ No newline at end of file diff --git a/deployment/morph/default.nix b/deployment/morph/default.nix deleted file mode 100644 index 1a1579085e..0000000000 --- a/deployment/morph/default.nix +++ /dev/null @@ -1,38 +0,0 @@ -{ pkgs, plutus }: -let - # Dummy definition of what is usually read from - # the terraform local resource `machines.json`. - # The attributes in below are read in `machines.nix` - tfinfo = { - rootSshKeys = [ ]; - rev = "dev"; - marloweDashA.dns = "marlowe-dash-a"; - marloweDashB.dns = "marlowe-dash-b"; - playgroundsA.dns = "playgrounds-a"; - playgroundsB.dns = "playgrounds-b"; - webghcA.dns = "webghc-a"; - webghcB.dns = "webghc-b"; - environment = "alpha"; - plutusTld = "plutus.iohkdev.io"; - marloweTld = "marlowe.iohkdev.io"; - }; - - # Fake `deployment` option definition so `pkgs.nixos` does not - # fail building the machines when it encounters the `deployment`. - fakeDeploymentOption = { lib, config, ... }: { - options.deployment = lib.mkOption { - type = lib.types.attrs; - description = "fake"; - }; - }; - - # Get a `buildMachine` function that wraps a `mkMachine` call with the fake deployment option - # in a `pkgs.nixos` call to build the machine outside of morph. - mkMachine = pkgs.callPackage ./mk-machine.nix { inherit plutus tfinfo; extraImports = [ fakeDeploymentOption ]; }; - buildMachine = { config, name }: (pkgs.nixos (mkMachine { inherit config name; })).toplevel; - linuxOnly = x: if pkgs.stdenv.isLinux then x else { }; -in -linuxOnly (import ./machines.nix { - inherit pkgs tfinfo; - mkMachine = buildMachine; -}) diff --git a/deployment/morph/machines.nix b/deployment/morph/machines.nix deleted file mode 100644 index 0b1c03a56e..0000000000 --- a/deployment/morph/machines.nix +++ /dev/null @@ -1,24 +0,0 @@ -{ pkgs, mkMachine, tfinfo }: -{ - # The network attribute allows to supply - # some settings to all deployments - network = { - description = "plutus network"; - inherit pkgs; - }; - - "${tfinfo.marloweDashA.dns}" = mkMachine { - name = "marloweDashA"; - config = ./machines/marlowe-dash.nix; - }; - - "${tfinfo.playgroundsA.dns}" = mkMachine { - name = "playgroundsB"; - config = ./machines/playground.nix; - }; - - "${tfinfo.webghcA.dns}" = mkMachine { - name = "webghcA"; - config = ./machines/web-ghc.nix; - }; -} diff --git a/deployment/morph/machines/marlowe-dash.nix b/deployment/morph/machines/marlowe-dash.nix deleted file mode 100644 index 4c2a603236..0000000000 --- a/deployment/morph/machines/marlowe-dash.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ pkgs, config, lib, tfinfo, ... }: -{ - imports = [ - ./std.nix - ../../../nix/modules/pab.nix - ]; - - networking = { - firewall.allowedTCPPorts = [ 22 80 9080 ]; - }; - - services.pab = { - enable = true; - pab-setup = pkgs.plutus-pab.pab-exes.plutus-pab-setup; - pab-executable = "${pkgs.marlowe-pab}/bin/marlowe-pab"; - staticContent = pkgs.marlowe-dashboard.client; - dbFile = "/var/lib/pab/pab-core.db"; - defaultWallet = 1; - webserverPort = 9080; - walletPort = 8081; - nodePort = 8082; - chainIndexPort = 8083; - signingProcessPort = 8084; - slotZeroTime = 1596059091000; # In milliseconds. See note [Datetime to slot] in Marlowe.Slot - slotLength = 1000; # In milliseconds - constantFee = 10; # Constant fee per transaction in lovelace - scriptsFeeFactor = 0.0; # Factor by which to multiply the size-dependent scripts fee in lovelace - }; - -} diff --git a/deployment/morph/machines/playground.nix b/deployment/morph/machines/playground.nix deleted file mode 100644 index 7d100eeb56..0000000000 --- a/deployment/morph/machines/playground.nix +++ /dev/null @@ -1,172 +0,0 @@ -{ pkgs, config, lib, tfinfo, ... }: -{ - - imports = [ - ./std.nix - ../../../nix/modules/plutus-playground.nix - ../../../nix/modules/marlowe-playground.nix - ]; - - networking = { - firewall.allowedTCPPorts = [ 22 80 8080 8181 9080 ]; - }; - - services.marlowe-playground = { - enable = true; - webghcURL = "http://${tfinfo.environment}.${tfinfo.plutusTld}"; - port = 4001; - frontendURL = - if tfinfo.environment == "production" - then "https://play.marlowe-finance.io" - else "https://${tfinfo.environment}.${tfinfo.marloweTld}"; - playground-server-package = pkgs.marlowe-playground.server; - }; - - services.plutus-playground = { - enable = true; - port = 4000; - webghcURL = "http://${tfinfo.environment}.${tfinfo.plutusTld}"; - frontendURL = "https://${tfinfo.environment}.${tfinfo.plutusTld}"; - playground-server-package = pkgs.plutus-playground.server; - }; - - services.nginx = - let - staticFileCacheControl = '' - # static files should not be too costly to serve so we can allow more generous rates - limit_req zone=staticlimit burst=1000; - add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; - expires off; - ''; - versionConfig = '' - default_type application/json; - return 200 '{"rev": "${tfinfo.rev}"}'; - ''; - in - { - enable = true; - recommendedGzipSettings = true; - recommendedProxySettings = true; - recommendedOptimisation = true; - - appendHttpConfig = '' - limit_req_zone $binary_remote_addr zone=plutuslimit:10m rate=2r/s; - limit_req_zone $binary_remote_addr zone=staticlimit:500m rate=100r/s; - server_names_hash_bucket_size 128; - log_format compression '$remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" "$gzip_ratio"'; - ''; - - upstreams = { - plutus-playground.servers."127.0.0.1:4000" = { }; - marlowe-playground.servers."127.0.0.1:4001" = { }; - }; - virtualHosts = { - "marlowe-web" = { - listen = [{ addr = "0.0.0.0"; port = 8181; }]; - locations = { - "/" = { - root = "${pkgs.marlowe-web}"; - extraConfig = '' - ${staticFileCacheControl} - ''; - }; - }; - }; - "plutus-playground" = { - listen = [{ addr = "0.0.0.0"; port = 8080; }]; - locations = { - "/version" = { - extraConfig = versionConfig; - }; - "/health" = { - proxyPass = "http://plutus-playground"; - }; - "/" = { - root = "${pkgs.plutus-playground.client}"; - extraConfig = '' - ${staticFileCacheControl} - error_page 404 = @fallback; - ''; - }; - "^~ /doc/" = { - alias = "${pkgs.plutus-docs.site}/"; - extraConfig = '' - error_page 404 = @fallback; - ''; - }; - "@fallback" = { - proxyPass = "http://plutus-playground"; - proxyWebsockets = true; - extraConfig = '' - limit_req zone=plutuslimit burst=10; - ''; - }; - }; - }; - "marlowe-playground" = { - listen = [{ addr = "0.0.0.0"; port = 9080; }]; - locations = { - "/version" = { - extraConfig = versionConfig; - }; - "/health" = { - proxyPass = "http://marlowe-playground"; - }; - "/" = { - root = "${pkgs.marlowe-playground.client}"; - extraConfig = '' - ${staticFileCacheControl} - error_page 404 = @fallback; - ''; - }; - "^~ /doc/" = { - alias = "${pkgs.plutus-docs.site}/"; - extraConfig = '' - error_page 404 = @fallback; - ''; - }; - "@fallback" = { - proxyPass = "http://marlowe-playground"; - proxyWebsockets = true; - extraConfig = '' - limit_req zone=plutuslimit burst=10; - ''; - }; - }; - }; - }; - }; - - deployment = { - secrets = { - "plutus-secrets" = { - source = "./secrets.plutus.${tfinfo.environment}.env"; - destination = "/var/lib/playgrounds/plutus.env"; - action = [ "systemctl" "restart" "plutus-playground" ]; - permissions = "0444"; - }; - "marlowe-secrets" = { - source = "./secrets.marlowe.${tfinfo.environment}.env"; - destination = "/var/lib/playgrounds/marlowe.env"; - action = [ "systemctl" "restart" "marlowe-playground" ]; - permissions = "0444"; - }; - - }; - healthChecks = { - cmd = [ - { - cmd = [ "systemctl" "status" "plutus-playground.service" ]; - description = "Check if plutus-playground systemd service is running"; - } - { - cmd = [ "systemctl" "status" "marlowe-playground.service" ]; - description = "Check if marlowe-playground systemd service is running"; - } - ]; - }; - }; - -} diff --git a/deployment/morph/machines/std.nix b/deployment/morph/machines/std.nix deleted file mode 100644 index 7dc3629617..0000000000 --- a/deployment/morph/machines/std.nix +++ /dev/null @@ -1,43 +0,0 @@ -{ config, lib, pkgs, tfinfo, ... }: -{ - - ec2.hvm = true; - - nixpkgs.localSystem.system = "x86_64-linux"; - - nix = { - binaryCaches = [ https://hydra.iohk.io https://cache.nixos.org ]; - requireSignedBinaryCaches = false; - extraOptions = '' - auto-optimise-store = true - ''; - trustedBinaryCaches = [ https://hydra.iohk.io ]; - binaryCachePublicKeys = [ - "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" - "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" - ]; - gc.automatic = true; - gc.options = "--delete-older-than 7d"; - }; - - # - # Enable the firewall, ports will opened up per machine - # - networking = { - firewall.enable = true; - timeServers = [ "1.amazon.pool.ntp.org" "2.amazon.pool.ntp.org" "3.amazon.pool.ntp.org" ]; - }; - - # This makes our networking stack ignore the AWS MTU advertisement of 9001, - # that silently breaks intra-VPC, for some reason. - # The intent of this is to reduce the MTU to 1500. - # TODO: check if this is really needed. - networking.dhcpcd.extraConfig = '' - nooption interface_mtu - ''; - - # Allow `--substitute-on-destination` causing the target machine to fetch - # dependencies from the iohk binary cache instead of copying everything - # from the machine executing morph. - deployment.substituteOnDestination = true; -} diff --git a/deployment/morph/machines/web-ghc.nix b/deployment/morph/machines/web-ghc.nix deleted file mode 100644 index 78d41e367a..0000000000 --- a/deployment/morph/machines/web-ghc.nix +++ /dev/null @@ -1,34 +0,0 @@ -{ pkgs, config, lib, tfinfo, ... }: -{ - - imports = [ - ./std.nix - ../../../nix/modules/web-ghc.nix - ]; - - networking = { - firewall.allowedTCPPorts = [ 22 80 ]; - }; - - services = { - web-ghc = { - enable = true; - port = 80; - web-ghc-package = pkgs.web-ghc; - }; - }; - - deployment.healthChecks = { - cmd = [ - { - cmd = [ "systemctl" "status" "web-ghc.service" ]; - description = "Check if webghc systemd service is running"; - } - { - cmd = [ "curl" "http://localhost/health" ]; - description = "webghc /health endpoint is responding"; - } - ]; - }; - -} diff --git a/deployment/morph/mk-machine.nix b/deployment/morph/mk-machine.nix deleted file mode 100644 index 06996b51ab..0000000000 --- a/deployment/morph/mk-machine.nix +++ /dev/null @@ -1,40 +0,0 @@ -{ pkgs, plutus, tfinfo, extraImports ? [ ] }: -# mkMachine :: { config : Path, name : String } -> NixOS machine -# Takes a machine specific configuration and a hostname to set and -# applies generic settings: -# - aws machine settings from ./profiles/std.nix -# - configures root ssh keys for -# - adds plutus specific packages through an overlay -{ config, name }: { - imports = extraImports ++ [ - - (pkgs.path + "/nixos/modules/virtualisation/amazon-image.nix") - - config - - ({ config, ... }: { - config._module.args.tfinfo = tfinfo; - }) - - ({ lib, config, ... }: - { - networking.hostName = name; - users.extraUsers.root.openssh.authorizedKeys.keys = tfinfo.rootSshKeys; - nixpkgs = { - inherit pkgs; - overlays = [ - (self: super: { - marlowe-pab = plutus.marlowe-pab; - plutus-pab = plutus.plutus-pab; - marlowe-dashboard = plutus.marlowe-dashboard; - marlowe-playground = plutus.marlowe-playground; - marlowe-web = plutus.marlowe-web; - plutus-playground = plutus.plutus-playground; - web-ghc = plutus.web-ghc; - plutus-docs = plutus.docs; - }) - ]; - }; - }) - ]; -} diff --git a/deployment/morph/network.nix b/deployment/morph/network.nix deleted file mode 100644 index 2d71dcd5bf..0000000000 --- a/deployment/morph/network.nix +++ /dev/null @@ -1,11 +0,0 @@ -let - plutus = import ../../. { }; - pkgs = plutus.pkgs; - tfinfo = builtins.fromJSON (builtins.readFile ./machines.json); - mkMachine = pkgs.callPackage ./mk-machine.nix { - inherit plutus tfinfo; - }; -in -import ./machines.nix { - inherit pkgs mkMachine tfinfo; -} diff --git a/deployment/shell.nix b/deployment/shell.nix deleted file mode 100644 index 0c9fbc6f95..0000000000 --- a/deployment/shell.nix +++ /dev/null @@ -1,251 +0,0 @@ -{ pkgs ? (import ./.. { }).pkgs -, rev ? "dev" -}: -let - inherit (pkgs) writeShellScriptBin lib mkShell stdenv writeText; - inherit (pkgs) awscli morph jq; - terraform = pkgs.terraform_0_12; - inherit (pkgs.gitAndTools) hub; - - # All environments and the region they are in - envs = import ./envs.nix; - - # mkDeploymentShell : Provide a deployment shell for a specific environment - # The shell expects to be executed from within the `deployment` directory and will - # not work when invoked from elsewhere. - mkDeploymentShell = - { env # environment to work on - , region # region to deploy to - , rev ? "dev" # git revision being deployed - }: - let - # setupEnvSecrets : Set environment variables with secrets from pass - # - env: Environment to setup - setupEnvSecrets = env: '' - export DEPLOYMENT_ENV="${env}" - - SECRETS=$(${awscli}/bin/aws secretsmanager get-secret-value --secret env/${env} --query SecretString --output text --region ${region}) - export TF_VAR_marlowe_github_client_id=$(echo $SECRETS | ${jq}/bin/jq --raw-output .marlowe.githubClientId) - export TF_VAR_marlowe_github_client_secret=$(echo $SECRETS | ${jq}/bin/jq --raw-output .marlowe.githubClientSecret) - export TF_VAR_marlowe_jwt_signature=$(echo $SECRETS | ${jq}/bin/jq --raw-output .marlowe.jwtSignature) - export TF_VAR_plutus_github_client_id=$(echo $SECRETS | ${jq}/bin/jq --raw-output .plutus.githubClientId) - export TF_VAR_plutus_github_client_secret=$(echo $SECRETS | ${jq}/bin/jq --raw-output .plutus.githubClientSecret) - export TF_VAR_plutus_jwt_signature=$(echo $SECRETS | ${jq}/bin/jq --raw-output .plutus.jwtSignature) - - # In order to avoid problems with API rate-limiting when using `wait-github-status` - # we can specify an OAUTH application id and secret - export GITHUB_API_USER=$TF_VAR_plutus_github_client_id - export GITHUB_API_PW=$TF_VAR_plutus_github_client_secret - ''; - - # setupTerraform : Switch to `env` workspace (create it if neccessary) - # - env: environment to work on - # - region: region the environment is in - setupTerraform = env: region: '' - export TF_VAR_env="${env}" - export TF_VAR_aws_region="${region}" - export TF_VAR_output_path=$(pwd) - - ${terraform}/bin/terraform init ./terraform - if ! ${terraform}/bin/terraform workspace list ./terraform | grep -q ${env} ; then - ${terraform}/bin/terraform workspace new ${env} ./terraform - ${terraform}/bin/terraform workspace select ${env} ./terraform - else - ${terraform}/bin/terraform workspace select ${env} ./terraform - fi - ''; - - # provisionInfra: Apply a terraform configuration - # Provision the current environment. - provisionInfra = writeShellScriptBin "provision-infra" '' - set -eou pipefail - - echo "[provision-infra]: Provisioning infrastructure using terraform" - ${terraform}/bin/terraform apply -auto-approve ./terraform - ''; - - # destroyInfra: Destroy any existing infrastructure via terraform - destroyInfra = writeShellScriptBin "destroy-infra" '' - set -eou pipefail - - echo "[provision-infra]: Destroying infrastructure using terraform" - ${terraform}/bin/terraform destroy ./terraform - ''; - - # wait-github-status: wait until the current commit has been processed by hydra - # - checks the github status in a loop with 60s breaks until it is is "success" - # - # NOTE: this script depends on the GITHUB_API_USER and GITHUB_API_PW variables - # that are set above in `setupEnvSecrets` to avoid rate limiting problems. - waitGitHubStatus = writeShellScriptBin "wait-github-status" '' - set -eou pipefail - - if [ -z $GITHUB_API_USER ] || [ -z $GITHUB_API_PW ]; then - echo "[wait-github-status]: GITHUB_API_USER and GITHUB_API_PW must be set! Exiting." - exit 1 - fi - - echo "[wait-github-status]: waiting for commit to get processed by hydra" - GIT_COMMIT=$(git rev-parse HEAD) - GITHUB_API_URL=https://api.github.com/repos/input-output-hk/plutus/commits/"$GIT_COMMIT"/status - - fetchCommitStatus() { - # Request the status from the GitHub API and build an object: - # { , } - # Note: the "buildkite/plutus" state gets filtered out otherwise we - # will be stuck in an infinite loop waiting for our own success. - curl --silent\ - -u "$GITHUB_API_USER:$GITHUB_API_PW" \ - -H "Accept: application/vnd.github.v3+json" \ - "$GITHUB_API_URL" \ - | ${jq}/bin/jq -c '.statuses | map(select(.context != "buildkite/plutus")) | map ({(.context): (.state)}) | add' - } - - while true; do - GH_STATUS_MAP=$(fetchCommitStatus) - - # If any of the tests are in a failed state we can abort - if echo "$GH_STATUS_MAP" | ${jq}/bin/jq -c "values | .[]" | grep "failure\|error\|action_required\|cancelled\|timed_out" ; then - echo "[wait-github-status]: github reported a failure. Exiting" - exit 1 - fi - - # Check if all statuses have already been reported. If - # not we need to keep on waiting - hydra isn't ready. - ALL_CHECKS_PRESENT=$(echo "$GH_STATUS_MAP" | ${jq}/bin/jq 'has("ci/hydra-eval") and has("ci/hydra:Cardano:plutus:required") and has("ci/hydra-build:required")') - if ! [ "$ALL_CHECKS_PRESENT" = "true" ] ; then - echo "[wait-github-status]: waiting for all statuses to get reported ..." - sleep 60 - continue - fi - - # All relevant statuses have been reported and none of them are in a failed state. - # If all of them are "success" we are done. If not we have to keep on waiting. - # NOTE: A status is one of the failures captured above, "pending" or "success". - ALL_CHECKS_SUCCESS=$(echo "$GH_STATUS_MAP" | ${jq}/bin/jq '[.[]] | all(. == "success")') - if [ "$ALL_CHECKS_SUCCESS" = "true" ] ; then - echo "[wait-github-status]: all statuses have been reported as successful" - exit 0 - fi - done - ''; - - # deploy-nix: wrapper around executing `morph deploy` - # - Checks if `machines.json` is present - aborts if not - # - Checks if terraform is up to date - aborts if not - # - Writes ssh configuration and copies secrets to the morph config directory - deployNix = writeShellScriptBin "deploy-nix" '' - set -eou pipefail - - - # In order to ensure a consistent state we verify that terraform - # reports it has nothing to do before we even attempt to deploy - # any nix configuration. - - # The local files (ssh configuration and dns/ip information on ec2 - # instances) is part of the state so we have to create these before - # we check if the state is up to date - echo "[deploy-nix]: Creating terraform bridge files" - rm -rf ./plutus_playground.$DEPLOYMENT_ENV.conf - rm -rf ./machines.json - ${terraform}/bin/terraform apply -auto-approve -target=local_file.ssh_config -target=local_file.machines ./terraform/ - - echo "[deploy-nix]: Checking if terraform state is up to date" - if ! ${terraform}/bin/terraform plan --detailed-exitcode -compact-warnings ./terraform >/dev/null ; then - echo "[deploy-nix]: terraform state is not up to date - Aborting" - exit 1 - fi - - # morph needs info about the ec2 instances that were created by terraform. - # This bridge is provided by `machines.json` which is a local resource created - # by terraform in `deployment/terraform/machines.tf`. - - if ! [ -f ./machines.json ]; then - echo "[deploy-nix]: machines.json is not present. Aborting." - exit 1 - fi - - echo "[deploy-nix]: copying machines.json .." - cat ./machines.json | jq --arg rev ${rev} '. + {rev: $rev}' > ./morph/machines.json - - if [ -z "$DEPLOYMENT_ENV" ]; then - echo "[deploy-nix]: Error, 'DEPLOYMENT_ENV' is not set! Aborting." - exit 1 - fi - - # Create secrets files which are uploaded using morph secrets - # feature. - - echo "[deploy-nix]: Writing plutus secrets ..." - plutus_tld=$(cat ./machines.json | ${jq}/bin/jq -r '.plutusTld') - cat > ./morph/secrets.plutus.$DEPLOYMENT_ENV.env < ./morph/secrets.marlowe.$DEPLOYMENT_ENV.env </dev/null ; then - echo "Error: Not logged in to aws. Aborting" - echo "Use 'eval \$(aws-mfa-login )' to log in" - exit 1 - fi - - ${setupEnvSecrets env} - ${setupTerraform env region} - - echo "---------------------------------------------------------------------" - echo "deployment shell for '${env}'" - echo "---------------------------------------------------------------------" - echo "Available commands:" - echo "" - echo -e "\t* provision-infra: provision infrastructure" - echo -e "\t* destroy-infra: destroy the infrastructure completely" - echo -e "\t* deploy-nix: deploy nix configuration to infrastructure" - echo -e "\t* deploy: provision infrastructure and deploy nix configuration" - echo -e "" - echo "Notes:" - echo "" - echo "- Being logged in to aws via 'aws-mfa-login' is a prerequisite to all infrastructure commands" - echo "- The './terraform' dir has to be specified to run arbitrary terraform commands (e.g. 'terraform plan ./terraform')" - echo "- The './morph/configurations.nix' file has to be specified to run arbitrary morph commands (e.g. 'morph build ./morph/configurations.nix) " - '' + lib.optionalString (stdenv.isDarwin) ''echo "- Deploying on macOS requires a remote builder to work"''; - }; -in -# provide a shell for each entry in `env` (use `nix-shell -A env` to enter) -builtins.mapAttrs - (env: cfg: mkDeploymentShell { - region = cfg.region; - inherit env rev; - }) - envs diff --git a/deployment/terraform/certificates.tf b/deployment/terraform/certificates.tf deleted file mode 100644 index b7986987c4..0000000000 --- a/deployment/terraform/certificates.tf +++ /dev/null @@ -1,144 +0,0 @@ -# Plutus Playground SSL Certificate -resource "aws_acm_certificate" "plutus_private" { - domain_name = "*.${var.plutus_tld}" - validation_method = "DNS" -} - -resource "aws_route53_record" "plutus_private" { - for_each = { - for dvo in aws_acm_certificate.plutus_private.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.plutus_public_zone -} - -resource "aws_acm_certificate_validation" "plutus_private" { - certificate_arn = aws_acm_certificate.plutus_private.arn - validation_record_fqdns = [for record in aws_route53_record.plutus_private : record.fqdn] -} - - -# Marlowe Playground SSL Certificate -resource "aws_acm_certificate" "marlowe_private" { - domain_name = "*.${var.marlowe_tld}" - validation_method = "DNS" -} - -resource "aws_route53_record" "marlowe_private" { - for_each = { - for dvo in aws_acm_certificate.marlowe_private.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.marlowe_public_zone -} - -resource "aws_acm_certificate_validation" "marlowe_private" { - certificate_arn = aws_acm_certificate.marlowe_private.arn - validation_record_fqdns = [for record in aws_route53_record.marlowe_private : record.fqdn] -} - -# Marlowe Dash SSL Certificate -resource "aws_acm_certificate" "marlowe_dash_private" { - domain_name = "*.${var.marlowe_dash_tld}" - validation_method = "DNS" -} - -resource "aws_route53_record" "marlowe_dash_private" { - for_each = { - for dvo in aws_acm_certificate.marlowe_dash_private.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.marlowe_dash_public_zone -} - -resource "aws_acm_certificate_validation" "marlowe_dash_private" { - certificate_arn = aws_acm_certificate.marlowe_dash_private.arn - validation_record_fqdns = [for record in aws_route53_record.marlowe_dash_private : record.fqdn] -} - -# Marlowe Web SSL Certificate -resource "aws_acm_certificate" "marlowe_web_private" { - domain_name = "*.${var.marlowe_web_tld}" - validation_method = "DNS" -} - -resource "aws_route53_record" "marlowe_web_private" { - for_each = { - for dvo in aws_acm_certificate.marlowe_web_private.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.marlowe_web_public_zone -} - -resource "aws_acm_certificate_validation" "marlowe_web_private" { - certificate_arn = aws_acm_certificate.marlowe_web_private.arn - validation_record_fqdns = [for record in aws_route53_record.marlowe_web_private : record.fqdn] -} - -# -# marlowe-finance.io certificates -# - -resource "aws_acm_certificate" "marlowe_finance_io" { - domain_name = "marlowe-finance.io" - validation_method = "DNS" - subject_alternative_names = ["*.marlowe-finance.io"] -} - -resource "aws_route53_record" "marlowe_finance_io" { - for_each = { - for dvo in aws_acm_certificate.marlowe_finance_io.domain_validation_options : dvo.domain_name => { - name = dvo.resource_record_name - record = dvo.resource_record_value - type = dvo.resource_record_type - } - } - - allow_overwrite = true - name = each.value.name - records = [each.value.record] - ttl = 60 - type = each.value.type - zone_id = var.marlowe_finance_io_public_zone -} - -resource "aws_acm_certificate_validation" "marlowe_finance_io" { - certificate_arn = aws_acm_certificate.marlowe_finance_io.arn - validation_record_fqdns = [for record in aws_route53_record.marlowe_finance_io : record.fqdn] -} diff --git a/deployment/terraform/loadbalancing.tf b/deployment/terraform/loadbalancing.tf deleted file mode 100644 index b0e40719c8..0000000000 --- a/deployment/terraform/loadbalancing.tf +++ /dev/null @@ -1,407 +0,0 @@ -# Public ALB - -# Security Group -resource "aws_security_group" "public_alb" { - vpc_id = aws_vpc.plutus.id - - ## inbound (world): ICMP 3:4 "Fragmentation Needed and Don't Fragment was Set" - ingress { - from_port = "3" - to_port = "4" - protocol = "ICMP" - cidr_blocks = ["0.0.0.0/0"] - } - - ## inbound (world): https - ingress { - from_port = "443" - to_port = "443" - protocol = "TCP" - cidr_blocks = ["0.0.0.0/0"] - } - - ## inbound (world): http - ingress { - from_port = "80" - to_port = "80" - protocol = "TCP" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 80 - to_port = 80 - protocol = "TCP" - cidr_blocks = var.private_subnet_cidrs - } - - egress { - # both PAB and plutus playground use the same port - from_port = local.plutus_playground_port - to_port = local.plutus_playground_port - protocol = "TCP" - cidr_blocks = var.private_subnet_cidrs - } - - egress { - from_port = local.marlowe_playground_port - to_port = local.marlowe_playground_port - protocol = "TCP" - cidr_blocks = var.private_subnet_cidrs - } - - egress { - from_port = local.marlowe_web_port - to_port = local.marlowe_web_port - protocol = "TCP" - cidr_blocks = var.private_subnet_cidrs - } - - tags = { - Name = "${local.project}_${var.env}_public_alb" - Project = local.project - Environment = var.env - } -} - -resource "aws_alb" "plutus" { - subnets = aws_subnet.public.*.id - security_groups = [aws_security_group.public_alb.id] - internal = false - - tags = { - Name = "${local.project}_${var.env}_public_alb" - Project = local.project - Environment = var.env - } -} - -resource "aws_lb_listener" "redirect" { - load_balancer_arn = aws_alb.plutus.arn - port = "80" - protocol = "HTTP" - - default_action { - type = "redirect" - - redirect { - port = "443" - protocol = "HTTPS" - status_code = "HTTP_301" - } - } -} - -resource "aws_alb_listener" "playground" { - load_balancer_arn = aws_alb.plutus.arn - port = "443" - protocol = "HTTPS" - certificate_arn = aws_acm_certificate.plutus_private.arn - - default_action { - target_group_arn = aws_alb_target_group.webghc.arn - type = "forward" - } -} - -resource "aws_lb_listener_certificate" "marlowe_finance_io" { - listener_arn = aws_alb_listener.playground.arn - certificate_arn = aws_acm_certificate.marlowe_finance_io.arn -} - -resource "aws_lb_listener_certificate" "marlowe_web" { - listener_arn = aws_alb_listener.playground.arn - certificate_arn = aws_acm_certificate.marlowe_web_private.arn -} - -resource "aws_lb_listener_certificate" "marlowe" { - listener_arn = aws_alb_listener.playground.arn - certificate_arn = aws_acm_certificate.marlowe_private.arn -} - -resource "aws_lb_listener_certificate" "marlowe_dash" { - listener_arn = aws_alb_listener.playground.arn - certificate_arn = aws_acm_certificate.marlowe_dash_private.arn -} - -resource "aws_alb_listener_rule" "marlowe-web" { - listener_arn = aws_alb_listener.playground.arn - action { - type = "forward" - target_group_arn = aws_alb_target_group.marlowe_web.id - } - - condition { - host_header { - values = [local.marlowe_web_domain_name] - } - } -} - -resource "aws_alb_target_group" "marlowe_web" { - port = "80" - protocol = "HTTP" - vpc_id = aws_vpc.plutus.id -} - -resource "aws_alb_target_group_attachment" "marlowe_web" { - target_group_arn = aws_alb_target_group.marlowe_web.arn - target_id = aws_instance.playgrounds_a.id - port = local.marlowe_web_port -} - -resource "aws_route53_record" "marlowe_web_alb" { - zone_id = var.marlowe_web_public_zone - name = local.marlowe_web_domain_name - type = "A" - - alias { - name = aws_alb.plutus.dns_name - zone_id = aws_alb.plutus.zone_id - evaluate_target_health = true - } -} - -## ALB rule for web-ghc -resource "aws_alb_target_group" "webghc" { - # ALB is taking care of SSL termination so we listen to port 80 here - port = "80" - protocol = "HTTP" - vpc_id = aws_vpc.plutus.id - - health_check { - path = "/health" - } - - stickiness { - type = "lb_cookie" - } -} - -resource "aws_alb_listener_rule" "runghc" { - depends_on = [aws_alb_target_group.webghc] - listener_arn = aws_alb_listener.playground.arn - priority = 100 - - action { - type = "forward" - target_group_arn = aws_alb_target_group.webghc.id - } - - condition { - path_pattern { - values = ["/runghc"] - } - } -} - -resource "aws_alb_target_group_attachment" "webghc_a" { - target_group_arn = aws_alb_target_group.webghc.arn - target_id = aws_instance.webghc_a.id - port = "80" -} - -## ALB rule for marlowe-dashboard -resource "aws_alb_target_group" "marlowe_dash" { - # ALB is taking care of SSL termination so we listen to port 80 here - port = "80" - protocol = "HTTP" - vpc_id = aws_vpc.plutus.id - - health_check { - path = "/" - } - - stickiness { - type = "lb_cookie" - } -} - -resource "aws_alb_listener_rule" "marlowe_dash" { - depends_on = [aws_alb_target_group.marlowe_dash] - listener_arn = aws_alb_listener.playground.arn - priority = 114 - - action { - type = "forward" - target_group_arn = aws_alb_target_group.marlowe_dash.id - } - - condition { - host_header { - values = [local.marlowe_dash_domain_name] - } - } -} - -resource "aws_alb_target_group_attachment" "marlowe_dash_a" { - target_group_arn = aws_alb_target_group.marlowe_dash.arn - target_id = aws_instance.marlowe_dash_a.id - port = local.pab_port -} - -resource "aws_route53_record" "marlowe_dash_alb" { - zone_id = var.marlowe_dash_public_zone - name = local.marlowe_dash_domain_name - type = "A" - - alias { - name = aws_alb.plutus.dns_name - zone_id = aws_alb.plutus.zone_id - evaluate_target_health = true - } -} - -## ALB rule for marlowe-playground -resource "aws_alb_target_group" "marlowe_playground" { - # ALB is taking care of SSL termination so we listen to port 80 here - port = "80" - protocol = "HTTP" - vpc_id = aws_vpc.plutus.id - - health_check { - path = "/version" - } - - stickiness { - type = "lb_cookie" - } -} - -resource "aws_alb_listener_rule" "marlowe_playground" { - depends_on = [aws_alb_target_group.marlowe_playground] - listener_arn = aws_alb_listener.playground.arn - priority = 115 - - action { - type = "forward" - target_group_arn = aws_alb_target_group.marlowe_playground.id - } - - condition { - host_header { - values = [local.marlowe_domain_name] - } - } -} - -resource "aws_alb_target_group_attachment" "marlowe_playground_a" { - target_group_arn = aws_alb_target_group.marlowe_playground.arn - target_id = aws_instance.playgrounds_a.id - port = local.marlowe_playground_port -} - -resource "aws_route53_record" "marlowe_playground_alb" { - zone_id = var.marlowe_public_zone - name = local.marlowe_domain_name - type = "A" - - alias { - name = aws_alb.plutus.dns_name - zone_id = aws_alb.plutus.zone_id - evaluate_target_health = true - } -} - -## ALB rule for plutus-playground -resource "aws_alb_target_group" "plutus_playground" { - # ALB is taking care of SSL termination so we listen to port 80 here - port = "80" - protocol = "HTTP" - vpc_id = aws_vpc.plutus.id - - health_check { - path = "/version" - } - - stickiness { - type = "lb_cookie" - } -} - -resource "aws_alb_listener_rule" "plutus_playground" { - depends_on = [aws_alb_target_group.plutus_playground] - listener_arn = aws_alb_listener.playground.arn - priority = 116 - - action { - type = "forward" - target_group_arn = aws_alb_target_group.plutus_playground.id - } - - condition { - host_header { - values = [local.plutus_domain_name] - } - } -} - -resource "aws_alb_target_group_attachment" "plutus_playground_a" { - target_group_arn = aws_alb_target_group.plutus_playground.arn - target_id = aws_instance.playgrounds_a.id - port = local.plutus_playground_port -} - -resource "aws_route53_record" "plutus_playground_alb" { - zone_id = var.plutus_public_zone - name = local.plutus_domain_name - type = "A" - - alias { - name = aws_alb.plutus.dns_name - zone_id = aws_alb.plutus.zone_id - evaluate_target_health = true - } -} - - -# -# Production: marlowe-finance.io forwarding -# - -resource "aws_alb_listener_rule" "marlowe-finance-marlowe-web" { - listener_arn = aws_alb_listener.playground.arn - action { - type = "forward" - target_group_arn = aws_alb_target_group.marlowe_web.id - } - - condition { - host_header { - values = ["marlowe-finance.io"] - } - } -} - -resource "aws_alb_listener_rule" "marlowe-finance-marlowe-dash" { - depends_on = [aws_alb_target_group.marlowe_dash] - listener_arn = aws_alb_listener.playground.arn - - action { - type = "forward" - target_group_arn = aws_alb_target_group.marlowe_dash.id - } - - condition { - host_header { - values = ["run.marlowe-finance.io"] - } - } -} - -resource "aws_alb_listener_rule" "marlowe-finance-marlowe-playground" { - depends_on = [aws_alb_target_group.marlowe_playground] - listener_arn = aws_alb_listener.playground.arn - - action { - type = "forward" - target_group_arn = aws_alb_target_group.marlowe_playground.id - } - - condition { - host_header { - values = ["play.marlowe-finance.io"] - } - } -} - diff --git a/deployment/terraform/locals.tf b/deployment/terraform/locals.tf deleted file mode 100644 index 8f55f28d3c..0000000000 --- a/deployment/terraform/locals.tf +++ /dev/null @@ -1,58 +0,0 @@ -# This file contains all values that do not need to be set at runtime. For example `env` must be a variable because you need to set the environment -# when you run terraform apply however despite `marlowe_domain_name` depending on the value of `env`, it does not need to be set when you run -# terraform apply as it is an expression that is evaluated based on `env` etc. -locals { - project = "plutus_playground" - - # By default domain names are structured by environment and type e.g. env.plutus.iohkdev.io but we can override those e.g. prodplutus.iohk.io - marlowe_domain_name = "${var.marlowe_full_domain != "" ? var.marlowe_full_domain : "${var.env}.${var.marlowe_tld}"}" - plutus_domain_name = "${var.plutus_full_domain != "" ? var.plutus_full_domain : "${var.env}.${var.plutus_tld}"}" - marlowe_dash_domain_name = "${var.env}.${var.marlowe_dash_tld}" - marlowe_web_domain_name = "${var.env}.${var.marlowe_web_tld}" - - marlowe_web_port = 8181 - plutus_playground_port = 8080 - marlowe_playground_port = 9080 - pab_port = 9080 - - # SSH Keys - ssh_keys = { - ci-deployer = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPlPr/5Pbz8yf1j+1G6tOKacQSsX4A9w4SM7MvXij21V deployer@ci" - pablo = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCeNj/ZQL+nynseTe42O4G5rs4WqyJKEOMcuiVBki2XT/UuoLz40Lw4b54HtwFTaUQQa3zmSJN5u/5KC8TW8nIKF/7fYChqypX3KKBSqBJe0Gul9ncTqHmzpzrwERlh5GkYSH+nr5t8cUK1pBilscKbCxh5x6irOnUmosoKJDv68WKq8WLsjpRslV5/1VztBanFFOZdD3tfIph1Yn7j1DQP4NcT1cQGoBhO0b0vwHtz6vTY4SpHnYuwB1K4dQ3k+gYJUspn03byi/8KVvcerLKfXYFKR5uvRkHihlIwjlxL2FoXIkGhtlkFVFOx76CvEv8LU5AT1ueJ34C/qP6PSD//pezXkk3e4UGeQMLOUu507FjfjHjD4luxIInzBb1KLAjzxb+2B4JTHy2uUu1dpHXarqSyR3DAPcLqUjZajZ+6mQh7zNRgkwXyZqg9p2TOdfiH9dvrqPowocGJgfjsYnd9rfdQVc10h1zk4pP4pP/YhgMVzYYc/ytCqUP41zSsrtJI592PUS9/quDGfrUcuG4t06DJgevky5AGX2og+sR4e83UpgId/DdV/m1OIvuoS4iMrzN2XmZ7IaFxH03nWQPrndDJ3j9ZHiaZ9IyW0XwthJFXcaslL5w3c0+1y8blxhC0vHT4NUsf5vcY3pFrBsMbTt1yNIGcitnLhXC1k99JbQ==" - hernan = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDR3qtsMDFjfMFBn+Xgic3cFLv5+wnKPTFV8ps3tlLnmJLPSVbhhXRYsn0ZDZtSbfSFyGWIEDLIBDp61DjkrO/qObv0hu9BOT54YSEUel89fTWHX2dEqUd0zEU9YvwHTVfIeuNOg3T7pcwtFSDCND/CE1o1rpYWWXshF10qrBVUuWJJxpJJF6LVVHD6xn6Yf6qR5PJ1WKJyR/+LL18FZuS4j0V0PJP1Kv1hHmlWM5v8N6IX+HQY/SdoB0e9xrOMbwFRTBxjpt2qeRVB7nskHnXEEBCm16aXi41XqdV+II1rkdY9oFPzjdNBTz7QHrf+1TIGiBIlhdC8tkbBtUPDZB/ywRtthM3o46dddxaVJnp1lqeVCDVckej4IYnRJTWYaFoG13peaIh+SXLGfLrdlWnjfzHx/4VmDfhpgi5Jmmfoel8S1n3cn4woEmbCK2aKWP1p8FCpY4QFICT5aJY3nkk0ciglbC58Q4sm3Pm3Hr3Stfe0RxZhQwosLAWX6kqr+EU= hrajchert@MacBook-Pro-de-Hernan.local" - bozhidar = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjoAB+Z1YyhKMV8tRqSTfkt4BYcYU2Y97WrVGOALOU6m5AGA/fHIq23ELalovG1Im1UWCDA/uMd7Avl9nUB2CxMhm33K2whUA62A6iUp6HdlxQg4C5c2uhxiJzhwLT8dUj5ACmxCGDVuy5o/2fQXyPXii/IjJnJv0Os39u1jipqRTeWfittZBVeIlu6e23H8HHuUmMvHyDPZZ6z1lER7ZaJh/fYN357mw5oJq7jee1SRsgu056v1550lhjWcKvKvaC4osvGBoxRDuPmlFaC/TBBld+kEaSV8GX+FsqCDTaezY+EpcDfLwpp+OsRvth48/8Bxx73e8izUdd/regbzUb boko@boko" - dimitar = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/GZjyhqoOMCbCEANAqpXOzVZsKnnAXkaZQICSSibk2AZxokgplHi9CpAX63M5fRhxy8YfA5v7iOUTYt8OYQEYm1EFlPWkf9CtUWIKp89uT5618SC6vbrFDY5qHXrgZRPSoyhO0/XNQSiGB34JwBQ5rvD1SAXSnoCNT6SvbgNuJfcCRVrIPdn60qmwNfyJmrHDyqbyENhDlYBdrBgncpki0SW51pJ0Q4OwC+686Mjo0I3IJcw9BHIrNoCxc84vR6o4IhjdSOs8lDej5iBccYQ833jI/EAnbhVbTKphPUzbnAeQnPcKV9DH/uv6J0c2jKcMXsSTSGsb2cLLt4xUy9I5 dimitar@dimitar-HP-ProBook-450-G4" - shlevy = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/fJqgjwPG7b5SRPtCovFmtjmAksUSNg3xHWyqBM4Cs shlevy@shlevy-laptop" - } - - # Anyone who wants ssh access to a machine needs ssh access to the bastion hosts - bastion_ssh_keys_ks = { - alpha = ["pablo", "shlevy", "ci-deployer"] - pablo = ["pablo"] - production = ["pablo", "shlevy", "ci-deployer"] - playground = ["pablo", "shlevy"] - testing = ["pablo", "shlevy", "bozhidar", "dimitar"] - hernan = ["hernan"] - staging = ["pablo", "shlevy", "ci-deployer"] - bitte_match = ["shlevy", "ci-deployer"] - } - bastion_ssh_keys = [for k in local.bastion_ssh_keys_ks[var.env] : local.ssh_keys[k]] - - # root users are able to deploy to the machines using morph - root_ssh_keys_ks = { - alpha = ["pablo", "shlevy", "ci-deployer"] - pablo = ["pablo"] - production = ["pablo", "shlevy", "ci-deployer"] - testing = ["pablo", "shlevy", "bozhidar", "dimitar"] - hernan = ["hernan"] - staging = ["pablo", "shlevy", "ci-deployer"] - bitte_match = ["shlevy", "ci-deployer"] - } - root_ssh_keys = [for k in local.root_ssh_keys_ks[var.env] : local.ssh_keys[k]] - -} - -module "nixos_image" { - source = "git::https://github.com/tweag/terraform-nixos.git//aws_image_nixos?ref=5f5a0408b299874d6a29d1271e9bffeee4c9ca71" - release = "20.09" -} diff --git a/deployment/terraform/machines.tf b/deployment/terraform/machines.tf deleted file mode 100644 index beeda3e557..0000000000 --- a/deployment/terraform/machines.tf +++ /dev/null @@ -1,38 +0,0 @@ -# This produces a json file with the names and addresses of all the EC2 instances that can then be used by morph -locals { - webghcA = { - name = "webghcA" - ip = "${element(concat(aws_instance.webghc_a.*.private_ip, list("")), 0)}" - dns = "webghc-a.${element(concat(aws_route53_zone.plutus_private_zone.*.name, list("")), 0)}" - } - - marloweDashA = { - name = "marloweDashA" - ip = "${element(concat(aws_instance.marlowe_dash_a.*.private_ip, list("")), 0)}" - dns = "marlowe-dash-a.${element(concat(aws_route53_zone.plutus_private_zone.*.name, list("")), 0)}" - } - - playgroundsA = { - name = "playgroundsA" - ip = "${element(concat(aws_instance.playgrounds_a.*.private_ip, list("")), 0)}" - dns = "playgrounds-a.${element(concat(aws_route53_zone.plutus_private_zone.*.name, list("")), 0)}" - } - - machines = { - webghcA = "${local.webghcA}" - marloweDashA = "${local.marloweDashA}" - playgroundsA = "${local.playgroundsA}" - rootSshKeys = local.root_ssh_keys - awsRegion = "${var.aws_region}" - environment = "${var.env}" - project = "${local.project}" - tld = "${var.plutus_tld}" - plutusTld = "${var.plutus_tld}" - marloweTld = "${var.marlowe_tld}" - } -} - -resource "local_file" "machines" { - content = jsonencode(local.machines) - filename = "${pathexpand(var.output_path)}/machines.json" -} diff --git a/deployment/terraform/main.tf b/deployment/terraform/main.tf deleted file mode 100644 index d1d8635286..0000000000 --- a/deployment/terraform/main.tf +++ /dev/null @@ -1,16 +0,0 @@ -terraform { - required_version = "~> 0.12.20" - - backend "s3" { - bucket = "plutus-playground-tf" - key = "state" - region = "eu-west-1" - profile = "plutus-playground" - } -} - -provider "aws" { - region = var.aws_region - version = "3.24.1" - profile = "plutus-playground" -} diff --git a/deployment/terraform/marlowe-dashboard.tf b/deployment/terraform/marlowe-dashboard.tf deleted file mode 100644 index 2457d63dc2..0000000000 --- a/deployment/terraform/marlowe-dashboard.tf +++ /dev/null @@ -1,73 +0,0 @@ -# Security Group -resource "aws_security_group" "marlowe_dash" { - vpc_id = aws_vpc.plutus.id - name = "${local.project}_${var.env}_marlowe_dash" - - ingress { - from_port = 22 - to_port = 22 - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ## inbound (world): http - - ingress { - from_port = local.pab_port - to_port = local.pab_port - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ## outgoing: all - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = { - Name = "${local.project}_${var.env}_marlowe_dash" - Project = local.project - Environment = var.env - } -} - -data "template_file" "marlowe_dash_user_data" { - template = file("${path.module}/templates/default_configuration.nix") - - vars = { - root_ssh_keys = join(" ", formatlist("\"%s\"", local.root_ssh_keys)) - } -} - -resource "aws_instance" "marlowe_dash_a" { - ami = module.nixos_image.ami - - instance_type = var.marlowe_dash_instance_type - subnet_id = aws_subnet.private.*.id[0] - user_data = data.template_file.marlowe_dash_user_data.rendered - - vpc_security_group_ids = [ - aws_security_group.marlowe_dash.id, - ] - - root_block_device { - volume_size = "20" - } - - tags = { - Name = "${local.project}_${var.env}_marlowe_dash_a" - Project = local.project - Environment = var.env - } -} - -resource "aws_route53_record" "marlowe_dash_internal_a" { - zone_id = aws_route53_zone.plutus_private_zone.zone_id - type = "A" - name = "marlowe-dash-a.${aws_route53_zone.plutus_private_zone.name}" - ttl = 300 - records = [aws_instance.marlowe_dash_a.private_ip] -} diff --git a/deployment/terraform/network.tf b/deployment/terraform/network.tf deleted file mode 100644 index 1cfe326a8f..0000000000 --- a/deployment/terraform/network.tf +++ /dev/null @@ -1,274 +0,0 @@ -# This file sets up a basic network with private and public subnets and bastion machines to enable ssh access to the private subnets - -# VPC -resource "aws_vpc" "plutus" { - cidr_block = var.vpc_cidr - enable_dns_hostnames = true - - tags = { - Name = "${local.project}_${var.env}" - Project = local.project - Environment = var.env - } -} - -# Public Subnets -resource "aws_subnet" "public" { - vpc_id = aws_vpc.plutus.id - availability_zone = "${var.aws_region}${var.azs[count.index]}" - cidr_block = var.public_subnet_cidrs[count.index] - count = length(var.azs) - - map_public_ip_on_launch = true - - tags = { - Name = "${local.project}_${var.env}_public_${var.azs[count.index]}" - Project = local.project - Environment = var.env - } -} - -# Internet Gateway -resource "aws_internet_gateway" "plutus" { - vpc_id = aws_vpc.plutus.id - - tags = { - Name = "${local.project}_${var.env}" - Project = local.project - Environment = var.env - } -} - -# Public Route -resource "aws_route" "public" { - route_table_id = aws_vpc.plutus.main_route_table_id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.plutus.id -} - -# Elastic IPs -resource "aws_eip" "nat" { - vpc = true - depends_on = [aws_internet_gateway.plutus] - count = length(var.azs) - - tags = { - Name = "${local.project}_${var.env}_${var.azs[count.index]}" - Project = local.project - Environment = var.env - } -} - -# NATs -resource "aws_nat_gateway" "plutus" { - count = length(var.azs) - allocation_id = aws_eip.nat.*.id[count.index] - subnet_id = aws_subnet.public.*.id[count.index] - depends_on = [aws_internet_gateway.plutus] - - tags = { - Name = "${local.project}_${var.env}_${var.azs[count.index]}" - Project = local.project - Environment = var.env - } -} - -# Associate public subnets to public route tables -resource "aws_route_table_association" "public" { - count = length(var.azs) - subnet_id = aws_subnet.public.*.id[count.index] - route_table_id = aws_vpc.plutus.main_route_table_id -} - -# Private Subnets -resource "aws_subnet" "private" { - count = length(var.azs) - vpc_id = aws_vpc.plutus.id - availability_zone = "${var.aws_region}${var.azs[count.index]}" - cidr_block = var.private_subnet_cidrs[count.index] - - tags = { - Name = "${local.project}_${var.env}_private_${var.azs[count.index]}" - Project = local.project - Environment = var.env - } -} - -# Private Route Tables -resource "aws_route_table" "private" { - count = length(var.azs) - vpc_id = aws_vpc.plutus.id - - tags = { - Name = "${local.project}_${var.env}_private_${var.azs[count.index]}" - Project = local.project - Environment = var.env - } -} - -# Private Routes -resource "aws_route" "private" { - count = length(var.azs) - route_table_id = aws_route_table.private.*.id[count.index] - destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = aws_nat_gateway.plutus.*.id[count.index] -} - -# Associate private subnets to private route tables -resource "aws_route_table_association" "private" { - count = length(var.azs) - subnet_id = aws_subnet.private.*.id[count.index] - route_table_id = aws_route_table.private.*.id[count.index] -} - - -resource "aws_instance" "bastion" { - count = length(var.azs) - ami = module.nixos_image.ami - instance_type = var.bastion_instance_type - associate_public_ip_address = true - user_data = data.template_file.bastion_user_data.rendered - source_dest_check = false - - vpc_security_group_ids = [ - "${aws_security_group.bastion.id}", - ] - - subnet_id = aws_subnet.public.*.id[count.index] - - root_block_device { - volume_size = 20 - } - - tags = { - Name = "${local.project}_${var.env}_bastion_${var.azs[count.index]}" - Project = local.project - Environment = var.env - } -} - -resource "aws_security_group" "bastion" { - vpc_id = aws_vpc.plutus.id - - # inbound (world): ICMP 3:4 "Fragmentation Needed and Don't Fragment was Set" - ingress { - from_port = "3" - to_port = "4" - protocol = "ICMP" - cidr_blocks = ["0.0.0.0/0"] - } - - ## inbound ssh - # We want to lock this down to the zerotier network in the future - ingress { - protocol = "TCP" - from_port = 22 - to_port = 22 - cidr_blocks = ["0.0.0.0/0"] - } - - ## FIXME: We are not using zerotier now, I think we can remove this - ## zerotier must use some custom protocol, TCP + UDP doesn't work - # Currently asking zerotier if I can lock this down further - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 22 - to_port = 22 - protocol = "TCP" - cidr_blocks = var.private_subnet_cidrs - } - - # Allow internet access to install things, we could maybe lock this down to nixpkgs somehow - # These are currently a bit useless since we are letting all traffic out due to zerotier - # but hopefully in the future we can lock things down further - egress { - from_port = 80 - to_port = 80 - protocol = "TCP" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 443 - to_port = 443 - protocol = "TCP" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = { - Name = "${local.project}_${var.env}_bastion" - Project = local.project - Environment = var.env - } -} - -resource "aws_route53_zone" "plutus_private_zone" { - vpc { - vpc_id = aws_vpc.plutus.id - } - name = "internal.${var.env}.${var.plutus_tld}" - - tags = { - Name = "${local.project}_${var.env}" - Project = local.project - Environment = var.env - } -} - -resource "aws_route53_zone" "marlowe_finance_io_zone" { - count = (var.env == "production" ? 1 : 0) - name = "marlowe-finance.io" -} - -resource "aws_route53_record" "marlowe_finance_top_level" { - count = (var.env == "production" ? 1 : 0) - zone_id = aws_route53_zone.marlowe_finance_io_zone[0].zone_id - name = "marlowe-finance.io" - type = "A" - ttl = 300 - records = [var.marlowe_finance_production_ip] -} - -resource "aws_route53_record" "marlowe_finance_play" { - count = (var.env == "production" ? 1 : 0) - zone_id = aws_route53_zone.marlowe_finance_io_zone[0].zone_id - name = "play.marlowe-finance.io" - type = "CNAME" - ttl = 300 - records = ["production.marlowe.iohkdev.io"] -} - -resource "aws_route53_record" "marlowe_finance_run" { - count = (var.env == "production" ? 1 : 0) - zone_id = aws_route53_zone.marlowe_finance_io_zone[0].zone_id - name = "run.marlowe-finance.io" - type = "CNAME" - ttl = 300 - records = ["production.marlowe-dash.iohkdev.io"] -} - -resource "aws_route53_record" "marlowe_finance_webinar" { - count = (var.env == "production" ? 1 : 0) - zone_id = aws_route53_zone.marlowe_finance_io_zone[0].zone_id - name = "webinar.marlowe-finance.io" - type = "CNAME" - ttl = 300 - records = ["wy8k2fnarz0v.wpeproxy.com"] -} - -# Bastion hosts -data "template_file" "bastion_user_data" { - template = "${file("${path.module}/templates/bastion_configuration.nix")}" - - vars = { - ssh_keys = "${join(" ", formatlist("\"command=\\\"echo 'this host is for forwarding only'\\\",no-X11-forwarding,no-user-rc %s\"", local.bastion_ssh_keys))}" - network_id = "canbeanything" - } -} - diff --git a/deployment/terraform/outputs.tf b/deployment/terraform/outputs.tf deleted file mode 100644 index fed322e886..0000000000 --- a/deployment/terraform/outputs.tf +++ /dev/null @@ -1,3 +0,0 @@ -output "region" { - value = "${var.aws_region}" -} diff --git a/deployment/terraform/playgrounds.tf b/deployment/terraform/playgrounds.tf deleted file mode 100644 index 078565a07f..0000000000 --- a/deployment/terraform/playgrounds.tf +++ /dev/null @@ -1,95 +0,0 @@ -# Security Group -resource "aws_security_group" "playgrounds" { - vpc_id = aws_vpc.plutus.id - name = "${local.project}_${var.env}_playgrounds" - - ## inbound (bastion hosts): ssh - ingress { - from_port = 22 - to_port = 22 - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ## inbound (world): http - - ingress { - from_port = 80 - to_port = 80 - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ingress { - from_port = local.plutus_playground_port - to_port = local.plutus_playground_port - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ingress { - from_port = local.marlowe_playground_port - to_port = local.marlowe_playground_port - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ingress { - from_port = local.marlowe_web_port - to_port = local.marlowe_web_port - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ## outgoing: all - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = { - Name = "${local.project}_${var.env}_playgrounds" - Project = local.project - Environment = var.env - } -} - -data "template_file" "playgrounds_user_data" { - template = file("${path.module}/templates/default_configuration.nix") - - vars = { - root_ssh_keys = join(" ", formatlist("\"%s\"", local.root_ssh_keys)) - } -} - -resource "aws_instance" "playgrounds_a" { - ami = module.nixos_image.ami - - instance_type = var.playgrounds_instance_type - subnet_id = aws_subnet.private.*.id[0] - user_data = data.template_file.playgrounds_user_data.rendered - - vpc_security_group_ids = [ - aws_security_group.playgrounds.id, - ] - - root_block_device { - volume_size = "20" - } - - tags = { - Name = "${local.project}_${var.env}_playgrounds_a" - Project = local.project - Environment = var.env - } -} - -resource "aws_route53_record" "playgrounds_internal_a" { - zone_id = aws_route53_zone.plutus_private_zone.zone_id - type = "A" - name = "playgrounds-a.${aws_route53_zone.plutus_private_zone.name}" - ttl = 300 - records = [aws_instance.playgrounds_a.private_ip] -} diff --git a/deployment/terraform/ssh_config.tf b/deployment/terraform/ssh_config.tf deleted file mode 100644 index 275fd586eb..0000000000 --- a/deployment/terraform/ssh_config.tf +++ /dev/null @@ -1,60 +0,0 @@ -data "template_file" "ssh_config_section_webghc_a" { - template = file("${path.module}/templates/ssh-config") - - vars = { - full_hostname = "webghc-a.${aws_route53_zone.plutus_private_zone.name}" - short_hostname = "webghc-a.${local.project}" - ip = aws_instance.webghc_a.private_ip - bastion_hostname = aws_instance.bastion.*.public_ip[0] - user_name = "root" - } -} - -data "template_file" "ssh_config_section_marlowe_dash_a" { - template = file("${path.module}/templates/ssh-config") - - vars = { - full_hostname = "marlowe-dash-a.${aws_route53_zone.plutus_private_zone.name}" - short_hostname = "marlowe-dash-a.${local.project}" - ip = aws_instance.marlowe_dash_a.private_ip - bastion_hostname = aws_instance.bastion.*.public_ip[0] - user_name = "root" - } -} - -data "template_file" "ssh_config_section_playgrounds_a" { - template = file("${path.module}/templates/ssh-config") - - vars = { - full_hostname = "playgrounds-a.${aws_route53_zone.plutus_private_zone.name}" - short_hostname = "playgrounds-a.${local.project}" - ip = aws_instance.playgrounds_a.private_ip - bastion_hostname = aws_instance.bastion.*.public_ip[0] - user_name = "root" - } -} - -data "template_file" "ssh_config" { - template = < ]; - ec2.hvm = true; - - users.users.bastion = - { - isNormalUser = true; - home = "/home/bastion"; - description = "Bastion SSH User"; - extraGroups = [ ]; - openssh.authorizedKeys.keys = [ ${ssh_keys} ]; - }; - - services.fail2ban.enable = true; - - environment.systemPackages = [ ]; - boot.kernel.sysctl = { - "net.ipv4.ip_forward" = 1; - }; - networking.firewall = { - enable = true; - allowedTCPPorts = [ 22 ]; - }; -} diff --git a/deployment/terraform/templates/default_configuration.nix b/deployment/terraform/templates/default_configuration.nix deleted file mode 100644 index c9cbc3ade7..0000000000 --- a/deployment/terraform/templates/default_configuration.nix +++ /dev/null @@ -1,20 +0,0 @@ -{ config, pkgs, ... }: -{ - imports = [ ]; - ec2.hvm = true; - services.fail2ban.enable = true; - users.extraUsers.root.openssh.authorizedKeys.keys = [ ${root_ssh_keys} ]; - - # we need to configure the binary caches here otherwise the - # initial morph deployment cannot substitute anything causing - # everything to be uploaded from the deployer machine - nix = { - binaryCaches = [ https://hydra.iohk.io https://cache.nixos.org ]; - requireSignedBinaryCaches = false; - trustedBinaryCaches = [ https://hydra.iohk.io ]; - binaryCachePublicKeys = [ - "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" - "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" - ]; - }; -} diff --git a/deployment/terraform/templates/prometheus_configuration.nix b/deployment/terraform/templates/prometheus_configuration.nix deleted file mode 100644 index cb0eb37760..0000000000 --- a/deployment/terraform/templates/prometheus_configuration.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ config, pkgs, ... }: -{ - imports = [ ]; - ec2.hvm = true; - nix = { - nixPath = [ - "nixpkgs=https://github.com/NixOS/nixpkgs/archive/5272327b81ed355bbed5659b8d303cf2979b6953.tar.gz" - "nixos-config=/etc/nixos/configuration.nix" - ]; - binaryCaches = [ https://hydra.iohk.io https://cache.nixos.org ]; - requireSignedBinaryCaches = false; - extraOptions = '' - build-cores = 8 - auto-optimise-store = true - ''; - trustedBinaryCaches = [ https://hydra.iohk.io ]; - binaryCachePublicKeys = [ - "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" - "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" - ]; - }; - - users.extraUsers.root.openssh.authorizedKeys.keys = [ ${ssh_keys} ]; - - environment.systemPackages = with pkgs; - [ vim tmux git ]; - - services.fail2ban.enable = true; - -} diff --git a/deployment/terraform/templates/ssh-config b/deployment/terraform/templates/ssh-config deleted file mode 100644 index 33c650aa16..0000000000 --- a/deployment/terraform/templates/ssh-config +++ /dev/null @@ -1,15 +0,0 @@ -Host ${full_hostname} - User ${user_name} - ProxyJump bastion@${bastion_hostname} - StrictHostKeyChecking no - -Host ${short_hostname} - Hostname ${full_hostname} - User ${user_name} - ProxyJump bastion@${bastion_hostname} - StrictHostKeyChecking no - -Host ${ip} - User ${user_name} - ProxyJump bastion@${bastion_hostname} - StrictHostKeyChecking no diff --git a/deployment/terraform/variables.tf b/deployment/terraform/variables.tf deleted file mode 100644 index 63b36f07ea..0000000000 --- a/deployment/terraform/variables.tf +++ /dev/null @@ -1,89 +0,0 @@ -# This file contains values that need to be changed at runtime. E.g. `env` and `aws_region` need to be set before running terraform apply -variable "aws_region" {} - -variable "env" {} - -variable "output_path" { -} - -variable "plutus_tld" { - default = "plutus.iohkdev.io" -} - -variable "plutus_full_domain" { - default = "" -} - -variable "plutus_public_zone" { - default = "ZBC2AQBA8QH4G" -} - -variable "marlowe_tld" { - default = "marlowe.iohkdev.io" -} - -variable "marlowe_full_domain" { - default = "" -} - -variable "marlowe_public_zone" { - default = "Z1VIYCTCY8RMLZ" -} - -variable "marlowe_dash_tld" { - default = "marlowe-dash.iohkdev.io" -} - -variable "marlowe_web_public_zone" { - default = "Z09016162N4S3NFVWHXYP" -} - -variable "marlowe_web_tld" { - default = "marlowe-web.iohkdev.io" -} - -variable "marlowe_dash_public_zone" { - default = "Z04600362E06M9P9U3Y12" -} - -variable "marlowe_finance_io_public_zone" { - default = "Z005888925CITFPGLQVVQ" -} - -variable "bastion_instance_type" { - default = "t3.micro" -} - -variable "webghc_instance_type" { - default = "t3.large" -} - -variable "playgrounds_instance_type" { - default = "t3.small" -} - -variable "marlowe_dash_instance_type" { - default = "t3.small" -} - -variable "vpc_cidr" { - default = "10.0.0.0/16" -} - -variable "public_subnet_cidrs" { - default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] -} - -variable "private_subnet_cidrs" { - default = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] -} - -# The public ip address of production.marlowe.iohkdev.io which -# is used to create a route53 A record for marlowe-finance.io. -variable "marlowe_finance_production_ip" { - default = "52.213.243.4" -} - -variable "azs" { - default = ["a", "b"] -} diff --git a/deployment/terraform/webghc.tf b/deployment/terraform/webghc.tf deleted file mode 100644 index 098899fa96..0000000000 --- a/deployment/terraform/webghc.tf +++ /dev/null @@ -1,73 +0,0 @@ -# Security Group -resource "aws_security_group" "webghc" { - vpc_id = aws_vpc.plutus.id - name = "${local.project}_${var.env}_webghc" - - ingress { - from_port = 22 - to_port = 22 - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ## inbound (world): http - - ingress { - from_port = 80 - to_port = 80 - protocol = "TCP" - cidr_blocks = concat(var.public_subnet_cidrs, var.private_subnet_cidrs) - } - - ## outgoing: all - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = { - Name = "${local.project}_${var.env}_webghc" - Project = local.project - Environment = var.env - } -} - -data "template_file" "webghc_user_data" { - template = file("${path.module}/templates/default_configuration.nix") - - vars = { - root_ssh_keys = join(" ", formatlist("\"%s\"", local.root_ssh_keys)) - } -} - -resource "aws_instance" "webghc_a" { - ami = module.nixos_image.ami - - instance_type = var.webghc_instance_type - subnet_id = aws_subnet.private.*.id[0] - user_data = data.template_file.webghc_user_data.rendered - - vpc_security_group_ids = [ - aws_security_group.webghc.id, - ] - - root_block_device { - volume_size = "20" - } - - tags = { - Name = "${local.project}_${var.env}_webghc_a" - Project = local.project - Environment = var.env - } -} - -resource "aws_route53_record" "webghc_internal_a" { - zone_id = aws_route53_zone.plutus_private_zone.zone_id - type = "A" - name = "webghc-a.${aws_route53_zone.plutus_private_zone.name}" - ttl = 300 - records = [aws_instance.webghc_a.private_ip] -} diff --git a/doc/README.md b/doc/README.md index 08c33da56d..bbaeefefdb 100644 --- a/doc/README.md +++ b/doc/README.md @@ -12,5 +12,5 @@ Or you can build it with Nix at the top level, which will also build the Haddock nix build -f default.nix docs.site ``` -The doc site from master is built automatically and hosted [here](https://plutus.readthedocs.io/en/latest). +The doc site from main is built automatically and hosted [here](https://plutus.readthedocs.io/en/latest). Additionally, the site is built for all PRs, and a link to a preview can be found in the PR statuses. diff --git a/extended-utxo-spec/ARCHITECTURE.adoc b/extended-utxo-spec/ARCHITECTURE.adoc deleted file mode 100644 index 05ceafe853..0000000000 --- a/extended-utxo-spec/ARCHITECTURE.adoc +++ /dev/null @@ -1,5 +0,0 @@ -=== `extended-utxo-spec` - -This folder contains the Extended UTXO model specification. - -NOTE: This is more of a design document, really, it's not aiming for full precision. \ No newline at end of file diff --git a/extended-utxo-spec/Makefile b/extended-utxo-spec/Makefile deleted file mode 100644 index 39f1e69565..0000000000 --- a/extended-utxo-spec/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -DOC=extended-utxo-specification - -PDF=${DOC}.pdf -BIB=${DOC}.bib - -FIGS=./figures - -SRC = ${DOC}.tex ${BIB} - - -LATEX = pdflatex -halt-on-error -shell-escape # To get pstricks to work with PDF -BIBTEX = bibtex - -.PHONEY: all pdf figs again clean - -#---------------------------------------------------------------- - -all: pdf - -pdf: ${PDF} - -${DOC}.pdf: ${SRC} - rm -f ${DOC}.bbl - ${LATEX} ${DOC} - ${BIBTEX} ${DOC} - ${LATEX} ${DOC} # to make sure the (cross)references are correct - ${LATEX} ${DOC} - ${LATEX} ${DOC} - -figs: - cd ${FIGS} && ${MAKE} - -#---------------------------------------------------------------- -again: - touch ${DOC}.tex && ${MAKE} - -clean1: - rm -f *.aux *.log *.cov *.par *.bbl *.fls *.blg *.xml *.bcf *.dvi *~ - cd ${FIGS} && ${MAKE} clean - -clean: clean1 - rm -f ${DOC}.pdf - -v: ${PDF} - acroread ${PDF} 2>/dev/null - diff --git a/extended-utxo-spec/default.nix b/extended-utxo-spec/default.nix deleted file mode 100644 index 4f430a3428..0000000000 --- a/extended-utxo-spec/default.nix +++ /dev/null @@ -1,7 +0,0 @@ -{ buildLatexDoc }: - -buildLatexDoc { - name = "extended-utxo-spec"; - src = ./.; - description = "Extended UTXO specification"; -} diff --git a/extended-utxo-spec/extended-utxo-specification.bib b/extended-utxo-spec/extended-utxo-specification.bib deleted file mode 100644 index 9b2cc8b913..0000000000 --- a/extended-utxo-spec/extended-utxo-specification.bib +++ /dev/null @@ -1,113 +0,0 @@ -@misc{Cardano, -title = {Cardano}, -howpublished={\url{https://www.cardano.org/en/home/}}, -year = {2015-2019} -} - -@article{Zahnentferner18-Chimeric, - author = {Joachim Zahnentferner}, - title = {Chimeric Ledgers: Translating and Unifying {UTxO}-based and Account-based - Cryptocurrencies}, - journal = {{IACR} Cryptology ePrint Archive}, - volume = {2018}, - pages = {262}, - year = {2018}, - url = {http://eprint.iacr.org/2018/262}, - timestamp = {Tue, 14 Aug 2018 17:08:26 +0200}, - biburl = {https://dblp.org/rec/bib/journals/iacr/Zahnentferner18}, - bibsource = {dblp computer science bibliography, https://dblp.org} -} - - -@article{Zahnentferner18-UTxO, - author = {Joachim Zahnentferner}, - title = {An Abstract Model of {UTxO}-based Cryptocurrencies with Scripts}, - journal = {{IACR} Cryptology ePrint Archive}, - volume = {2018}, - pages = {469}, - year = {2018}, - url = {https://eprint.iacr.org/2018/469}, - timestamp = {Tue, 14 Aug 2018 17:08:11 +0200}, - biburl = {https://dblp.org/rec/bib/journals/iacr/Zahnentferner18a}, - bibsource = {dblp computer science bibliography, https://dblp.org} -} - - -@misc{Bitcoin-script, -title={{Bitcoin Script} reference guide}, -year={2013--2018}, -howpublished={\url{https://en.bitcoin.it/wiki/Script}} -} - -@misc{Nakamoto, -title={{Bitcoin: A Peer-to-Peer Electronic Cash System}}, -author={S. Nakamoto}, -month={October}, -year={2008}, -howpublished={\url{https://bitcoin.org/en/bitcoin-paper}} -} - -@misc{Solidity, -title={{Solidity documentation}}, -year = {2016--2019}, -howpublished={\url{https://solidity.readthedocs.io/}} -} - -@misc{ERC-721, -title={{ERC-721 standard for non-fungible tokens in Ethereum}}, -author= {Ethereum}, -year = {2017}, -howpublished={\url{http://erc721.org/}} -} -% Who exactly should the author be? - -@inproceedings{Simplicity, - author = {Russel O'Connor}, - title = {Simplicity: A New Language for Blockchains}, - booktitle = {Proceedings of the 2017 Workshop on Programming Languages and Analysis for Security}, - year = {2017} -} - - -@book{Narayanan-bitcoin, - author = {Narayanan, Arvind and Bonneau, Joseph and Felten, Edward and Miller, Andrew and Goldfeder, Steven}, - title = {Bitcoin and Cryptocurrency Technologies: A Comprehensive Introduction}, - year = {2016}, - isbn = {0691171696, 9780691171692}, - publisher = {Princeton University Press}, - address = {Princeton, NJ, USA}, -} - -@techreport{Cardano-wallet-spec, -author = {Duncan Coutts and Edsko de Vries}, -title = {Formal specification for a {Cardano} wallet}, -note={Available at \url{https://cardanodocs.com/files/formal-specification-of-the-cardano-wallet.pdf}}, -institution={IOHK}, -year={2018} -} - -@techreport{Cardano-ledger-spec, -title = {A Formal Specification of the {Cardano} Ledger}, -author={Jared Corduan and Polina Vinogradova and Matthias G{\"u}demann}, -note={Available at \url{https://github.com/input-output-hk/cardano-ledger-specs}}, -institution={IOHK}, -year={2019} -} - - -@techreport{Plutus-Core-spec, -title = {{Formal Specification of the Plutus Core Language}}, -note={Available at \url{https://github.com/input-output-hk/plutus/tree/master/plutus-core-spec}}, -author={IOHK}, -institution={IOHK}, -year={2019} -} - - -@book{Plutus-book, -title={{Plutus: Writing Reliable Smart Contracts}}, -author={Lars Br{\"u}njes and Polina Vinogradova}, -note={Available at \url{https://github.com/input-output-hk/plutus/tree/master/plutus-book}}, -institution={IOHK}, -year={2019} -} \ No newline at end of file diff --git a/extended-utxo-spec/extended-utxo-specification.tex b/extended-utxo-spec/extended-utxo-specification.tex deleted file mode 100644 index d9cb9220ee..0000000000 --- a/extended-utxo-spec/extended-utxo-specification.tex +++ /dev/null @@ -1,1395 +0,0 @@ -%% Extended UTXO Specification -\newcommand\version{-1} - -\title{The Extended UTXO Ledger Model} - -\pagestyle{plain} -\date{18th April 2020} - -\author{} - -\documentclass[a4paper]{article} - -% correct bad hyphenation here -\hyphenation{} - -\usepackage{natbib} -\usepackage{url} - -% *** MATHS PACKAGES *** -% -\usepackage[cmex10]{amsmath} -\usepackage{amssymb} -\usepackage{stmaryrd} -\usepackage{amsthm} - -% *** ALIGNMENT PACKAGES *** -% -\usepackage{array} -\usepackage{float} %% Try to improve placement of figures. Doesn't work well with subcaption package. -\usepackage{subcaption} -\usepackage{caption} - -\usepackage{subfiles} -\usepackage{geometry} -\usepackage{listings} -\usepackage[dvipsnames]{xcolor} -\usepackage{verbatim} -\usepackage{alltt} - -%\usepackage{todonotes} -\usepackage[disable]{todonotes} - -% These have to go at the end of the packages. -\usepackage[colorlinks=true,linkcolor=MidnightBlue,citecolor=ForestGreen,urlcolor=Plum]{hyperref} -\usepackage[capitalise, noabbrev]{cleveref} - -% Mild hack to get cleveref to refer to things as "Rules": we alias the -% enumerate counters to "rule". This would be annoying if we ever wanted to -% refer to items or lists as something other than "rule", but we don't. -\crefname{rule}{rule}{rules} -\crefname{rule}{Rule}{Rules} -\crefalias{enumi}{rule} -\crefalias{enumii}{rule} - -% Stuff for splitting figures over page breaks -%\DeclareCaptionLabelFormat{continued}{#1~#2 (Continued)} -%\captionsetup[ContinuedFloat]{labelformat=continued} - -% *** MACROS *** - -% A command for making notes with bold titles and an independent -% numbering system. This is used for a list of notes in the appendix. -\newcounter{note} -\newcommand{\note}[1]{ - \bigskip - \refstepcounter{note} - \noindent\textbf{Note \thenote. #1} -} - -\newcommand{\todochak}[1]{\todo[inline,color=purple!40,author=chak]{#1}} -\newcommand{\todompj}[1]{\todo[inline,color=yellow!40,author=Michael]{#1}} -\newcommand{\todokwxm}[1]{\todo[inline,color=blue!20,author=kwxm]{#1}} -\newcommand{\todojm}[1]{\todo[inline,color=purple!40,author=Jann]{#1}} - -\newcommand{\red}[1]{\textcolor{red}{#1}} -\newcommand{\redfootnote}[1]{\red{\footnote{\red{#1}}}} -\newcommand{\blue}[1]{\textcolor{blue}{#1}} -\newcommand{\bluefootnote}[1]{\blue{\footnote{\blue{#1}}}} - -%% A version of ^{\prime} for use in text mode -\makeatletter -\DeclareTextCommand{\textprime}{\encodingdefault}{% - \mbox{$\m@th'\kern-\scriptspace$}% -} -\makeatother - -\renewcommand{\i}{\textit} % Just to speed up typing: replace these in the final version -\renewcommand{\t}{\texttt} % Just to speed up typing: replace these in the final version -\newcommand{\s}{\textsf} % Just to speed up typing: replace these in the final version -\newcommand{\msf}[1]{\ensuremath{\mathsf{#1}}} -\newcommand{\mi}[1]{\ensuremath{\mathit{#1}}} - -%% A figure with rules above and below. -\newcommand\rfskip{7pt} -\newenvironment{ruledfigure}[1]{\begin{figure}[#1]\hrule\vspace{\rfskip}}{\vspace{\rfskip}\hrule\end{figure}} - -%% Various text macros -\newcommand{\true}{\textsf{true}} -\newcommand{\false}{\textsf{false}} - -\newcommand{\hash}[1]{\ensuremath{#1^{\#}}} - -\newcommand{\List}[1]{\ensuremath{\s{List}[#1]}} -\newcommand{\Set}[1]{\ensuremath{\s{Set}[#1]}} -\newcommand{\FinSet}[1]{\ensuremath{\s{FinSet}[#1]}} -\newcommand{\Interval}[1]{\ensuremath{\s{Interval}[#1]}} -\newcommand{\FinSup}[2]{\ensuremath{\s{FinSup}[#1,#2]}} -\newcommand{\support}{\msf{support}} - -\newcommand{\script}{\ensuremath{\s{Script}}} -\newcommand{\scriptAddr}{\msf{scriptAddr}} -\newcommand{\ctx}{\ensuremath{\s{Context}}} -\newcommand{\toData}{\ensuremath{\s{toData}}} -\newcommand{\toTxData}{\ensuremath{\s{toTxData}}} -\newcommand{\mkContext}{\ensuremath{\s{mkContext}}} - -% Macros for eutxo things. -\newcommand{\TxId}{\ensuremath{\s{TxId}}} -\newcommand{\txId}{\msf{txId}} -\newcommand{\txrefid}{\mi{id}} -\newcommand{\Address}{\ensuremath{\s{Address}}} -\newcommand{\DataHash}{\ensuremath{\s{DataHash}}} -\newcommand{\hashData}{\msf{dataHash}} -\newcommand{\idx}{\mi{index}} -\newcommand{\inputs}{\mi{inputs}} -\newcommand{\outputs}{\mi{outputs}} -\newcommand{\forge}{\mi{forge}} -\newcommand{\forgeScripts}{\mi{forgeScripts}} -\newcommand{\fee}{\mi{fee}} -\newcommand{\addr}{\mi{addr}} -\newcommand{\val}{\mi{value}} %% \value is already defined - -\newcommand{\validator}{\mi{validator}} -\newcommand{\redeemer}{\mi{redeemer}} -\newcommand{\datum}{\mi{datum}} -\newcommand{\datumHash}{\mi{datumHash}} -\newcommand{\datumWits}{\mi{datumWitnesses}} -\newcommand{\Data}{\ensuremath{\s{Data}}} - -\newcommand{\outputref}{\mi{outputRef}} -\newcommand{\txin}{\mi{in}} -\newcommand{\id}{\mi{id}} -\newcommand{\lookupTx}{\msf{lookupTx}} -\newcommand{\getSpent}{\msf{getSpentOutput}} - -\newcommand{\slotnum}{\ensuremath{\s{SlotNumber}}} -\newcommand{\spent}{\msf{spentOutputs}} -\newcommand{\unspent}{\msf{unspentOutputs}} -\newcommand{\txunspent}{\msf{unspentTxOutputs}} -\newcommand{\eutxotx}{\msf{Tx}} - -\newcommand{\qty}{\ensuremath{\s{Quantity}}} -\newcommand{\token}{\ensuremath{\s{Token}}} -\newcommand{\currency}{\ensuremath{\s{CurrencyId}}} -\newcommand{\nativeCur}{\ensuremath{\mathrm{nativeC}}} -\newcommand{\nativeTok}{\ensuremath{\mathrm{nativeT}}} - -\newcommand{\qtymap}{\ensuremath{\s{Quantities}}} - -\newcommand{\applyScript}[1]{\ensuremath{\llbracket#1\rrbracket}} -\newcommand{\applyMPScript}[1]{\ensuremath{\llbracket#1\rrbracket}} - -\newcommand\B{\ensuremath{\mathbb{B}}} -\newcommand\N{\ensuremath{\mathbb{N}}} -\newcommand\Z{\ensuremath{\mathbb{Z}}} -\renewcommand\H{\ensuremath{\mathbb{H}}} -%% \H is usually the Hungarian double acute accent -\newcommand{\emptyBs}{\ensuremath{\emptyset}} - -\newcommand{\emptymap}{\ensuremath{\{\}}} - - -%% ------------- Start of document ------------- %% - -\begin{document} -\maketitle - -\section{Introduction: The Extended UTXO Model} -\label{sec:intro} -The Cardano blockchain~\citep{Cardano, Cardano-ledger-spec} uses a -variant of the \textit{Unspent Transaction Output} (UTXO) model used -by Bitcoin. Transactions consume \textit{unspent outputs} (UTXOs) -from previous transactions and produce new outputs which can be used -as inputs to later transactions. Unspent outputs are the liquid funds -on the blockchain. Users do not have individual accounts, but rather -have a software \textit{wallet} on a smartphone or PC which manages -UTXOs on the blockchain and can initiate transactions involving UTXOs -owned by the user. Every core node on the blockchain maintains a -record of all of the currently unspent outputs, the \textit{UTXO set}; -when outputs are spent, they are removed from the UTXO set. - -This document contains a description of some extensions of the -UTXO model: the main aim of these extensions is to facilitate the -implementation of \textit{smart contracts}, programs which perform -automated and irrevocable transfer of funds on the blockchain, subject -to certain conditions being met. A smart contract may involve -multiple transactions, and our aim is to define a transaction model -which enables the implementation of highly expressive contracts. - -An important feature of our UTXO models is \textit{scripts}, -programs which run on the blockchain to check the validity of -transactions. In Cardano, scripts will be programs in the Plutus Core -language~\citep{Plutus-Core-spec}. The Extended UTXO models are largely -agnostic as to the scripting language. - - -\subsection{Structure of the document} -\label{sec:doc-structure} -The papers~\citep{Zahnentferner18-Chimeric} and -\citep{Zahnentferner18-UTxO} give a formal specification of a basic -UTXO model. See \cref{note:basic-utxo} for some background on -this model. - -\medskip -\noindent This document proposes two extensions of the basic UTXO -model (EUTXO stands for \textit{Extended UTXO}): - -\begin{itemize} - \item \textbf{EUTXO-1} (\cref{sec:eutxo-1}): this extends the - basic UTXO model with enhanced scripting features, allowing the - implementation of complex smart contracts. - \item \textbf{EUTXO-2} (\cref{sec:eutxo-2}): this adds - multicurrency features to EUTXO-1, allowing users to define - \textit{custom currencies} and \textit{non-fungible tokens}. -\end{itemize} - - -\medskip - -The rationale for providing two separate extensions is that (1) -introducing the extensions separately clarifies the structure of the -models and makes it easier to explain the relevant design decisions, -and (2) it is possible that a particular blockchain might not need the -full power of EUTXO-2 and so could use the simpler EUTXO-1 model, -perhaps with less computational overhead. - -\medskip - -For ease of reference we have kept exposition to a minimum in the main -text. Some aspects of the models are explained in more detail -in \cref{appendix:comments}, with cross-references in the main -text. Further explanation and many examples are contained in the -book~\citep{Plutus-book}. - - -\section{Notation} -This section defines some basic notation. We generally follow the -notation established by \citep{Zahnentferner18-UTxO}, except that we make use -of finitely-supported functions in most places that \citep{Zahnentferner18-UTxO} -use maps. - -\subsection{Basic types and operations} -\label{sec:basic-notation} - -This section describes some types, notation, and -conventions used in the remainder of the document. - -\begin{itemize} -\item Types are typeset in $\mathsf{sans~serif}$. - -\item \B{} denotes the type of booleans, $\{\false, \true\}$. -\item \N{} denotes the type of natural numbers, $\{0, 1, 2, \ldots\}$. -\item \Z{} denotes the type of integers, $\{\ldots, -2, -1, 0, 1, 2, \ldots\}$. -\item We regard $\N$ as a subtype of $\Z$ and convert freely between - natural numbers and non-negative integers. -\item \H{} denotes the type of bytestrings, - $\bigcup_{n=0}^{\infty}\{0,1\}^{8n}$. - - \emptyBs{} denotes the empty bytestring. - - A bytestring is a sequence of 8-bit bytes: the symbol $\H$ is used because bytestrings are often - presented as sequences of hexadecimal digits. - -\item If a type $M$ is a monoid, we use $+$ for the monoidal operation and $0$ - for the unit of the monoid. - - If $M$ is a commutative monoid, we use $\sum$ for the extension of $+$ to a finite set of elements of - type $M$. - - If $M$ is a group, we use $-$ for the group - inverse operation. - - This should never be ambiguous. - -\item A record type with fields $\phi_1, \ldots, \phi_n$ of types $T_1, - \ldots, T_n$ is denoted by $(\phi_1 : T_1, \ldots, \phi_n : T_n)$. - - If $t$ is a value of a record type $T$ and $\phi$ is the name - of a field of $T$ then $t.\phi$ denotes the value of $\phi$ for - $t$. - -\item If $T$ is a type then $\FinSet{T}$ is the type of finite sets - with elements of type $T$. - -\item A list $l$ of type $\List{T}$ is either the empty list - $[]$ or a list $e :: l$ with $head$ $e$ of type $T$ and - $tail$ $l$ of type $\List{T}$. A list has only a finite - number of elements. We denote the $i$th element of a list $l$ by - $l[i]$ and the length of $l$ by $\left|l\right|$. - -\item $x \mapsto f(x)$ denotes an anonymous function. - -\item A cryptographic collision-resistant hash of a value $c$ is denoted $\hash{c}$. - -\item For a type $A$ which forms a total order, $\Interval{A}$ is the - type of intervals over that type. Intervals may be bounded or - unbounded, and open or closed at either end. The type $\Interval{A}$ - forms a lattice under inclusion. -\end{itemize} - -\subsection {Finitely-supported functions} -\label{sec:fsfs} - -Finitely-supported functions are a generalisation of maps to monoidal values. -They always return an answer (which will in all but finitely many cases be -zero), and can be queried for the set of non-zero points in their domain. - -For two types $K$ and $V$ where $V$ is a monoid, $\FinSup{K}{V}$ denotes the type of -\textit{finitely-supported functions} from $K$ to $V$. That is, there is a -function $\support : \FinSup{K}{V} \rightarrow \FinSet{K}$ such that -$k \in \support(f) \Leftrightarrow f(k) \neq 0$. - -Equality on finitely-supported functions is defined as pointwise equality. Similarly, -if $V$ has a partial order, then a partial order on finitely-supported functions -is also defined pointwise. - -If the type $M$ is a monoid then we define the sum of two finitely-supported -functions -$f, g \in \FinSup{K}{M}$ to be the function $f+g \in \FinSup{K}{M}$ given by -\[(f+g)(k) = f(k) + g(k) \] -Note that the type $\FinSup{K}{M}$ is a monoid with this -operation, and the empty function as -identity element. - -If the type $M$ is a group, then we can -similarly define the inverse of a finitely-supported function $f$ as -the function $(-f)$ with the same support, given by -\[ (-f)(k) = -f(k) \] -Again, $\FinSup{K}{M}$ is a group with this operation. - -\smallskip - -See \cref{note:finitely-supported-functions} for discussion of using -finitely-supported functions computationally. - -\subsection{The \Data{} type} -We also define a type \Data{} which can be used to pass information -into scripts in a type-safe manner: see \cref{fig:data-defn}. The -definition is given here in EBNF form, but can easily be translated to -a Haskell type, for instance. - -\begin{ruledfigure}{H} -\begin{alltt} - \Data = - "I" \(\Z\) - | "B" \(\H\) - | "Constr" \(\N (\List{\Data})\) - | "List" \(\List{\Data}\) - | "Map" \(\List{\Data\times\Data}\) -\end{alltt} -\caption{The \Data{} type} -\label{fig:data-defn} -\end{ruledfigure} - -\noindent Thus values of type \Data{} are nested sums and products -built up recursively from the base types of integers and -bytestrings. This allows one to encode a large variety of first-order -data structures: for example, we could encode values of Haskell's -\verb|Maybe Integer| type using \verb|Constr 0 []| to represent -\verb|Nothing| and \verb|Constr 1 [I 41]| to encode \verb|Just 41|. - - -The \texttt{List} and \texttt{Map} constructors are strictly -redundant, but are included for convenience to allow straightforward -encoding of lists and records. - -We assume that the scripting language has the ability to parse values -of type \Data{}, converting them into a suitable internal representation. - -\section{EUTXO-1: Enhanced scripting} -\label{sec:eutxo-1} -The EUTXO-1 model adds the following new features to the model -proposed in~\citep{Zahnentferner18-UTxO}: - -\begin{itemize} -\item Every transaction has a \textit{validity interval}, of type $\Interval{\slotnum}$. - A core node will only process the transaction if - the current slot number lies within the transaction's validity - interval. - -\item The redeemer script of~\citet{Zahnentferner18-UTxO} has been - replaced with a \textit{redeemer object} (\textit{redeemer} for short) of - type \Data{}. - -\item Each unspent output now has an object of type \Data{} associated - with it: we call this the output's \textit{datum} (or occasionally - \emph{datum object}) (see \cref{note:datum}). Only the hash - $\datumHash$ of the datum is stored in the output: the full value - must be provided when the output is spent, much like the validator. - -\item Validator scripts make use of information about the pending - transaction (ie, the transaction which is just about to take place, - assuming that validation succeeds). This information is contained in - a structure which we call \ctx{} (see \cref{sec:context} for its - definition). We may refer to this information as the - \textit{validation context} in cases where ambiguity may arise. - -\item Validation of an output is performed by running the validator - with three inputs: - \begin{enumerate} - \item the datum, - \item the redeemer, - \item the \ctx{} information, encoded as \Data{}. - \end{enumerate} - -\end{itemize} - -\subsection{A Formal Description of the EUTXO-1 Model} -\label{section:eutxo-spec} - -In this section we give a formal description of the EUTXO-1 model. The -description is given in a straightforward set-theoretic form, which -(a) admits an almost direct translation into Haskell, and (b) should -easily be amenable to mechanical formalisation. This will potentially -allow us to argue formally about smart contracts and to develop tools -for automatic contract analysis. - -The definitions in this section are essentially the definitions of -UTXO-based cryptocurrencies with scripts from -\citep{Zahnentferner18-UTxO}, except that we have added the new -features mentioned above (the validity interval, the datum -and the \ctx{} structure), changed the type of the redeemer from -\script{} to \Data{}, and used finitely-supported functions in place -of maps. - -\Cref{fig:eutxo-1-types} lists the types and operations used in the -the basic EUTXO model. Some of these are defined, the others must be provided by -the ledger. -%% -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{rll} - \multicolumn{3}{l}{\textsc{Ledger primitives}}\\[4pt] - \qty{} && \mbox{an amount of currency}\\ - \slotnum && \mbox{a slot number}\\ - \Address && \mbox{the ``address'' of a script in the blockchain}\\ - \DataHash && \mbox{the hash of an object of type \Data{}}\\ - \TxId && \mbox{the identifier of a transaction}\\ - \txId : \eutxotx \rightarrow \TxId && \mbox{a function computing the identifier of a transaction}\\ - \lookupTx : \s{Ledger} \times \TxId \rightarrow \eutxotx && \mbox{a function retrieving a transaction via its identifier}\\ - \script && \mbox{the (opaque) type of scripts}\\ - \scriptAddr : \script \rightarrow \Address && \mbox{the address of a script}\\ - \hashData : \Data \rightarrow \DataHash && \mbox{the hash of a data object}\\ - \applyScript{\cdot}: \script \rightarrow \Data \times \cdots \times - \Data \rightarrow \B && \mbox{application of a script to its arguments}\\ - \\ - \multicolumn{3}{l}{\textsc{Defined types}}\\[4pt] - \s{Output } &=&(\addr: \Address,\\ - & &\ \val: \qty,\\ - & &\ \datumHash: \DataHash)\\ - \\ - \s{OutputRef } &=&(\txrefid: \TxId, \idx: \s{Int})\\ - \\ - \s{Input } &=&(\outputref: \s{OutputRef},\\ - & &\ \validator: \script,\\ - & &\ \datum: \Data,\\ - & &\ \redeemer: \Data)\\ - \\ - \eutxotx\s{ } &=&(\inputs: \FinSet{\s{Input}},\\ - & &\ \outputs: \List{\s{Output}},\\ - & &\ \i{validityInterval}: \Interval{\slotnum},\\ - & &\ \datumWits: \FinSup{\DataHash}{\Data},\\ - & &\ \fee: \qty,\\ - & &\ \forge: \qty) \\ - \\ - \s{Ledger } &=&\!\List{\eutxotx}\\ - \end{array} - \end{displaymath} - \caption{Primitives and basic types for the EUTXO-1 model} - \label{fig:eutxo-1-types} -\end{ruledfigure} - -\subsubsection{Remarks} -\paragraph{ETUXO-1 on Cardano.} -The Cardano implementation of EUTXO-1 uses the primitives given in -\cref{fig:eutxo-1-types-cardano}. - -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{rll} - \qty{} &=& \Z\\ - \slotnum &=& \N\\ - \Address &=& \H\\ - \DataHash &=& \H\\ - \TxId &=& \H\\ - \txId : \eutxotx \rightarrow \TxId &=& t \mapsto \hash{t}\\ - \script & & \mbox{a Plutus Core program}\\ - \scriptAddr : \script \rightarrow \Address &=& s \mapsto \hash{s}\\ - \applyScript{\cdot} : \script \rightarrow \Data \times \cdots \times - \Data \rightarrow \B & & \mbox{running the Plutus Core interpreter with a}\\ - &&\mbox{script and a number of data objects as input}\\ - \end{array} - \end{displaymath} - \caption{Cardano primitives for the EUTXO-1 model} - \label{fig:eutxo-1-types-cardano} -\end{ruledfigure} - -\paragraph{Transaction identifiers.} We assume that -each transaction has a unique identifier (in Cardano, the hash of a -$\eutxotx$ object) and that a transaction can be efficiently retrieved -from a ledger using the $\lookupTx$ function. - -\paragraph{Inputs and outputs.} Note that a transaction has a -\textsf{Set} of inputs but a \textsf{List} of outputs. See -\cref{note:inputs-and-outputs} for a discussion of why this is. - -\paragraph{Validator addresses in outputs.} The \textit{addr} field -of an output should contain the address of the validator script for -that output: this requirement is enforced in -\cref{rule:validator-scripts-hash} of -\cref{fig:eutxo-1-validity} below. - -\paragraph{Scripts and hashes.} Note that datum objects and validators are -provided as parts of transaction inputs, even though they are -conceptually part of the output being spent. The reasons for -this are explained in \cref{note:scripts}. - -\paragraph{Applying scripts} A script $s$ may expect some number $n$ -of datum objects as arguments (the number $n$ depending on the type of -the script). The result of running the script with the datum objects -$d_1,\ldots, d_n$ as arguments is denoted by -$\applyScript{s}(d_1,\ldots,d_n)$. As mentioned at the start of this -section, validator scripts take three arguments. - -\paragraph{Datum witnesses.} The transaction may include the full -value of the datum for each output that it creates. See -\cref{note:datum-witnesses} for more discussion. - -\paragraph{Fees.} Users are charged a fee for the on-chain storage -and execution costs of a transaction, and this is included in the -EUTXO models. The details are not important for the purposes of the -models, but see \cref{note:fees} for some more discussion. - -\paragraph{Special types of transaction.} In a practical -implementation it might be useful to include special cases for common -transaction types such as pay-to-pukbey transactions in order to -increase efficiency and decrease storage requirements (and hence -reduce fees). These have been omitted from this model because it -subsumes all of the other transaction types we're likely to encounter, -and also because it's difficult to give a definitive list of such -special cases. - -\paragraph{Ledger structure.} We model a ledger as a simple -list of transactions: a real blockchain ledger will be more complex -than this, but the only property that we really require is that -transactions in the ledger have some kind of address which allows them -to be uniquely identified and retrieved. - -\subsubsection{The \ctx{} type} -\label{sec:context} -Recall from the introduction to \cref{sec:eutxo-1} that when a -transaction input is being validated, the validator is supplied -with an object of type \ctx{} which contains information about the -pending transaction. The \ctx{} type for the current version of -EUTXO-1 is defined in \cref{fig:ptx-1-types}, along with some -related types. - -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{rll} - \s{OutputInfo } &=&(\val: \qty,\\ - & &\ \i{validatorHash}: \Address,\\ - & &\ \datumHash: \DataHash)\\ - \\ - \s{InputInfo } &=&(\outputref: \s{OutputRef},\\ - & &\ \i{validatorHash}: \Address,\\ - & &\ \i{datumHash}: \DataHash,\\ - & &\ \i{redeemerHash}: \DataHash,\\ - & &\ \val: \qty)\\ - \\ - \ctx\s{ } &=&(\i{inputInfo}: \List{\s{InputInfo}},\\ - & &\ \i{thisInput}: \N,\\ - & &\ \i{outputInfo}: \List{\s{OutputInfo}},\\ - & &\ \i{validityInterval}: \Interval{\slotnum},\\ - & &\ \datumWits: \FinSup{\DataHash}{\Data},\\ - & &\ \fee: \qty,\\ - & &\ \forge: \qty)\\ - \\ - \mkContext: \eutxotx \times \s{Input} \times \s{Ledger} \rightarrow \ctx - && \mbox{\parbox[t]{6cm}{summarises a transaction in the context of an input and a ledger state}}\\ - \\ - %% Without the break after the right arrow, the = sign is well - %% over halfway across the pages, which is horrible. This makes - %% it a bit better, but not much. - \toData: \ctx \rightarrow \Data & & \mbox{encodes a \ctx{} as \Data} - \end{array} - \end{displaymath} - \caption{The \ctx{} type for the EUTXO-1 model} - \label{fig:ptx-1-types} -\end{ruledfigure} - -\subsection{Remarks} -\paragraph{The contents of \ctx{}.} -The \ctx{} type is essentially a summary of the information contained -in the $\eutxotx$ type in \cref{fig:eutxo-1-types}. The \fee{}, -\forge{}, and \i{validityInterval} fields are copied directly from the -pending transaction. The \i{outputInfo} field contains information -about the outputs which will be produced if the pending transaction -validates successfully: it contains only the address of the relevant -validator, and the hash of the datum.\footnote{See - \cref{note:datum-objects-in-ptx} for further explanation.} The -\i{inputInfo} field contains information about the inputs to the -pending transaction, but provides only the hashes of the validators -and redeemers for the inputs. The \i{thisInput} field -is an index pointing to the element of \i{inputInfo} relating to the -input currently undergoing validation. -% Note: in the code at the moment, the hashes of the validator -% and redeemer scripts in inputInfo are allowed to be absent -% when we have pubkey inputs. We're ignoring that special case here. - -\paragraph{Defining \mkContext{} and \toData.} -Assuming we have an -appropriate hashing function, it is straightforward to define \mkContext. -For the implementation of \toData, note that the $inputs$ field is a -\FinSet{} in \eutxotx{}, but a \List{} in \ctx{}. Therefore \toData{} has to introduce -an ordering of the transaction inputs. Contract authors cannot make -any assumptions about this ordering and therefore should ensure that -their scripts pass or fail regardless of what particular permutation -of transaction inputs they are presented with. - -Apart from that, the function \toData{} is implementation-dependent and -we will not discuss it further. - -\paragraph{Determinism.} -The information provided in the \ctx{} -structure is sufficiently limited that the validation process -becomes \textit{deterministic}, which has important implications -for fee calculations. See \cref{note:validation-determinism} -for further discussion. - - -\subsection{Validity of EUTXO-1 transactions} -\label{sec:eutxo-1-validity} -A number of conditions must be satisfied in order for a transaction -$t$ to be considered valid with respect to a ledger $l$. - -\Cref{fig:validation-functions-1} defines some auxiliary functions used in validation. -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{lll} - \multicolumn{3}{l}{\txunspent : \eutxotx \rightarrow \FinSet{\s{OutputRef}}}\\ - \txunspent(t) &=& \{(\txId(t),1), \ldots, (\txId(id),\left|t.outputs\right|)\}\\ - \\ - \multicolumn{3}{l}{\unspent : \s{Ledger} \rightarrow \FinSet{\s{OutputRef}}}\\ - \unspent([]) &=& \emptymap \\ - \unspent(t::l) &=& (\unspent(l) \setminus t.\inputs) \cup \txunspent(t)\\ - \\ - \multicolumn{3}{l}{\getSpent : \s{Input} \times \s{Ledger} \rightarrow \s{Output}}\\ - \getSpent(i,l) &=& \lookupTx (l, i.\outputref.\id).\outputs[i.\outputref.\idx] - \end{array} - \end{displaymath} - \caption{Auxiliary functions for transaction validation} - \label{fig:validation-functions-1} -\end{ruledfigure} - -It is perhaps not immediately obvious that the $\unspent$ function -only yields a \textit{finite} set of outputs: however, this can be -proved by induction on the slot number, using the facts that the -initial ledger is empty and that each transaction only produces a -finite number of outputs. - -Note also that $\getSpent$ uses the $\lookupTx$ function, which can of -course fail if the ledger contains no transaction with the relevant -identifier; however we only use $\getSpent$ during transaction -validation, and our validity rules ensure that in that case -transaction lookup will always succeed: see Note~\ref{note:tx-lookup-never-fails}. - -\medskip - -We can now define what it means for a transaction $t$ of type -$\eutxotx$ to be valid for a ledger $l$ during the slot -$\msf{currentSlot}$: see \cref{fig:eutxo-1-validity}. Our definition -combines Definitions 6 and 14 from \citep{Zahnentferner18-UTxO}, -differing from the latter in \cref{rule:all-inputs-validate}. - -\begin{ruledfigure}{H} -\begin{enumerate} - -\item - \label{rule:slot-in-range} - \textbf{The current slot is within the validity interval} - \begin{displaymath} - \msf{currentSlot} \in t.\i{validityInterval} - \end{displaymath} - -\item - \label{rule:all-outputs-are-non-negative} - \textbf{All outputs have non-negative values} - \begin{displaymath} - \textrm{For all } o \in t.\outputs,\ o.\val \geq 0 - \end{displaymath} - -\item - \label{rule:all-inputs-refer-to-unspent-outputs} - \textbf{All inputs refer to unspent outputs} - \begin{displaymath} - \{i.\outputref: i \in t.\inputs \} \subseteq \unspent(l). - \end{displaymath} - -\item - \label{rule:forging} - \textbf{Forging} - -%% Don't delete this blank line ^ - A transaction with a non-zero \forge{} - field is only valid if the ledger $l$ is empty (that - is, if it is the initial transaction). - -\item - \label{rule:value-is-preserved} - \textbf{Value is preserved} - \begin{displaymath} - t.\forge + \sum_{i \in t.\inputs} \getSpent(i, l).\val = t.\fee + \sum_{o \in t.\outputs} o.\val - \end{displaymath} - -\item - \label{rule:no-double-spending} - \textbf{No output is double spent} - \begin{displaymath} - \textrm{If } i_1, i_2 \in t.\inputs \textrm{ and } i_1.\outputref = i_2.\outputref - \textrm{ then } i_1 = i_2. - \end{displaymath} - -\item - \label{rule:all-inputs-validate} - \textbf{All inputs validate} - \begin{displaymath} - \textrm{For all } i \in t.\inputs,\ \applyScript{i.\validator}(i.\datum,\, i.\redeemer, - \, \toData(\mkContext(t,i,l))) = \true. - \end{displaymath} - -\item - \label{rule:validator-scripts-hash} - \textbf{Validator scripts match output addresses} - \begin{displaymath} - \textrm{For all } i \in t.\inputs,\ \scriptAddr(i.\validator) = \getSpent(i, l).\addr - \end{displaymath} - -\item - \label{rule:datum-objects-hash} - \textbf{Datum objects match output hashes} - \begin{displaymath} - \textrm{For all } i \in t.\inputs,\ \hashData(i.\datum) = \getSpent(i, l).\datumHash - \end{displaymath} - -\end{enumerate} -\caption{Validity of a transaction $t$ in the EUTXO-1 model} -\label{fig:eutxo-1-validity} -\end{ruledfigure} -\todokwxm{Do we really needs the $\applyScript{\cdots}$ business?} - - -\noindent We say that a ledger -$l$ is \textit{valid} if either $l$ is empty or -$l$ is of the form $t::l^{\prime}$ with -$l^{\prime}$ valid and $t$ valid for $l^{\prime}$. - -In practice, validity imposes a limit on the sizes of the -$\validator$ \script{}, the $\redeemer$ and -$\datum$ \Data{} fields, and the result of \toData. The validation of a -single transaction must take place within one slot, so the evaluation -of $\applyScript{\cdot}$ cannot take longer than one slot. -\todokwxm{Do we need this $\uparrow$?} - -%%\newpage -\section{EUTXO-2: multicurrency support and non-fungible tokens} -\label{sec:eutxo-2} -We now extend the EUTXO-1 model further by introducing features which -allow, among other things, the implementation of new currencies and -\textit{non-fungible tokens} (NFTs). - -\paragraph{Multiple currencies.} The EUTXO-2 model allows an unlimited -number of \textit{currencies}. Each custom currency has a unique -identifier and a \textit{monetary policy script} which may be used to -limit the way in which the currency is used (for example, by only -allowing specified users to create units of the currency). - -\todokwxm{We may wish to implement a DEX to enable exchange of custom - currencies. This is a little problematic in Ethereum because the - Ether currency itself does not conform to ERC-20 and so has to be - wrapped in another currency before it can participate in DEX trades. - Will the fact that Ada is treated differently from custom currencies - in EUTXO cause us difficulties here?} - -\paragraph{NFTs.} -A non-fungible token (NFT) is a unique object which can be transferred -to another user, but not duplicated. NFTs have proven useful in a -number of blockchain applications (see~\citep{ERC-721} for example); -for example, they can represent ownership of some object in a game. -We can implement NFTs as custom currencies whose supply is limited to -a single coin. - -\subsection{The definition of EUTXO-2} -In order to support these extensions, we introduce several new types. -Custom currencies are represented by unique \textit{currency - identifiers} and each currency has a number of \textit{tokens} which -partition each custom currency into a number of sub-currencies. The -basic idea is that ordinary currencies have a single token whose -sub-currency has an unlimited supply and NFTs have a number of tokens -with the sub-currency for each token limited to a supply of one. - -The changes to the basic EUTXO-1 types are quite simple: see -\cref{fig:eutxo-2-types}. We change the type of the $\val$ field in -the \s{Output} type to be \qtymap{}, representing values of all -currencies. We also change the type of the \forge{} field on -transactions to \qtymap{}, to allow the creation and destruction of -funds in all currencies; the supply of a currency can be reduced by -forging a negative amount of that currency, as in EUTXO-1. In -addition, transactions now have a set $\forgeScripts$ of monetary policy -scripts, each of which takes a single $\Data$ argument summarising the -current transaction; we assume that there is a function $\toTxData: -\eutxotx \rightarrow \Data$ which creates such objects. - - - - -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{rll} - \multicolumn{3}{l}{\textsc{Ledger primitives}}\\[4pt] - \token && \mbox{a type consisting of identifiers for individual tokens}\\ - \\ - \toTxData : \eutxotx \rightarrow \Data &~& \mbox{encode a transaction as $\Data$} - \\ - \medskip - \\ - \multicolumn{3}{l}{\textsc{Defined types}}\\[4pt] - \currency &=& \Address \enspace\mbox{(an identifier for a custom currency)}\\ - \\ - \qtymap &=& \FinSup{\currency}{\FinSup{\token}{\qty}}\\ - \\ - \s{Output}_2 &=&(\addr: \Address,\\ - & &\ \val: \qtymap\\ - & &\ \datumHash: \DataHash)\\ - \\ - \s{OutputRef}_2 &= &(\txrefid: \TxId, \idx: \s{Int})\\ - \\ - \s{Input}_2 &=&( \outputref: \sf{OutputRef}_2,\\ - & &\ \validator: \script,\\ - & &\ \datum: \Data,\\ - & &\ \redeemer: \Data)\\ - \\ - \eutxotx_2 &=&(\inputs: \FinSet{\s{Input}_2},\\ - & &\ \outputs: \List{\s{Output}_2},\\ - & &\ \i{validityInterval}: \Interval{\slotnum},\\ - & &\ \datumWits: \FinSup{\DataHash}{\Data},\\ - & &\ \fee: \qtymap,\\ - & &\ \forge: \qtymap,\\ - & &\ \forgeScripts: \FinSet{\script})\\ - \\ - \s{Ledger}_2 &=&\!\List{\eutxotx_2}\\ - \end{array} - \end{displaymath} - \caption{Extra primitives and basic types for the EUTXO-2 model} - \label{fig:eutxo-2-types} -\end{ruledfigure} - -\subsubsection{Remarks} -\paragraph{ETUXO-2 on Cardano.} -The Cardano implementation of EUTXO-2 uses the primitives given in -\cref{fig:eutxo-2-types-cardano}. -Cardano also defines an \emph{native currency} and \emph{native currency token}. -This allows defining a native currency that behaves as a simple \qty{}. This -is used in Fig~\ref{fig:cardano-fee-validity}. -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{rll} - \currency &=& \H\\ - \token &=& \H\\ - \nativeCur &=& \emptyBs\\ - \nativeTok &=& \emptyBs\\ - \end{array} - \end{displaymath} - \caption{Cardano primitives for the EUTXO-2 model} - \label{fig:eutxo-2-types-cardano} -\end{ruledfigure} - -\paragraph{\qtymap{}. } -The \qtymap{} type represents a collection of funds from a -number of currencies and their subcurrencies. - -\qtymap{} is a finitely-supported function \emph{to} another finitely-supported -function. This is well-defined, since finitely-supported functions form a monoid. - -\subsection{The \ctx{} type for EUTXO-2} -\label{sec:pendingtx-2} -The \ctx{} type must be also be updated for the EUTXO-2 model. All -that is required is to replace \qty{} by \qtymap{} everywhere in -\cref{fig:ptx-1-types} except for the \fee{} field, and to add the -monetary policy scripts: for reference the details are given in -\cref{fig:ptx-2-types}. -\begin{ruledfigure}{H} - \begin{displaymath} - \begin{array}{rll} - \s{OutputInfo}_2\s{ } &=&(\val: \qtymap,\\ - & &\ \i{validatorHash}: \Address,\\ - & &\ \datumHash: \DataHash)\\ - \\ - \s{InputInfo}_2\s{ } &=& (\outputref: \s{OutputRef},\\ - & &\ \i{validatorHash}: \Address,\\ - & &\ \i{datumHash}: \DataHash,\\ - & &\ \i{redeemerHash}: \DataHash),\\ - & &\ \val: \qtymap)\\ - \\ - \ctx_2\s{ } &=&(\i{inputInfo}: \List{\s{InputInfo}_2},\\ - & &\ \i{thisInput}: \N,\\ - & &\ \i{outputInfo}: \List{\s{OutputInfo$_2$}},\\ - & &\ \i{validityInterval}: \Interval{\slotnum},\\ - & &\ \datumWits: \FinSup{\DataHash}{\Data},\\ - & &\ \fee: \qtymap,\\ - & &\ \forge: \qtymap,\\ - & &\ \forgeScripts: \FinSet{\script})\\ - \\ - \mkContext_2: \eutxotx_2 \times \s{Input} \times \s{Ledger} \rightarrow \ctx_2 && - \mbox{\parbox[t]{6cm}{summarises a transaction in the context of an input and a ledger state}}\\ - \\ - \toData_2: \ctx_2 \rightarrow \Data &~& \mbox{encodes a $\ctx_2$ object}\\ - \end{array} - \end{displaymath} - \caption{The \ctx{} type for the EUTXO-2 model} - \label{fig:ptx-2-types} -\end{ruledfigure} - -\subsection{Validity of EUTXO-2 transactions} -\label{sec:eutxo-2-validity} - -\bigskip -\noindent The validity conditions from -\cref{fig:eutxo-1-validity} must also be updated to take account -of multiple currencies. - -We can now adapt the definition of validity for EUTXO-1 -(\cref{fig:eutxo-1-validity}) to obtain a definition of validity for -EUTXO-2: see \cref{fig:eutxo-2-validity}. - -\begin{ruledfigure}{H} -\begin{enumerate} - -\item - \label{rule:slot-in-range-2} - \textbf{The current slot is within the validity interval} - \begin{displaymath} - \msf{currentSlot} \in t.\i{validityInterval} - \end{displaymath} - -\item - \label{rule:all-outputs-are-non-negative-2} - \textbf{All outputs have non-negative values} - \begin{displaymath} - \textrm{For all } o \in t.\outputs,\ o.\val \geq 0 - \end{displaymath} - -\item - \label{rule:all-inputs-refer-to-unspent-outputs-2} - \textbf{All inputs refer to unspent outputs} - \begin{displaymath} - \{i.\outputref: i \in t.\inputs \} \subseteq \unspent(l). - \end{displaymath} - -\item - \label{rule:forging-2} - \textbf{Forging} - - %% Don't delete this blank line ^ - A transaction with a non-zero \forge{} field is only - valid if either: - \begin{enumerate} - \item the ledger $l$ is empty (that is, if it is the initial transaction). - \item \label{rule:custom-forge} - for every key $h \in \support(t.\forge)$, there - exists $s \in t.\forgeScripts$ with $\scriptAddr(s) = h$. - \end{enumerate} - -\item - \label{rule:value-is-preserved-2} - \textbf{Values are preserved} - \begin{displaymath} - t.\forge + \sum_{i \in t.\inputs} \getSpent(i, l) = t.\fee + \sum_{o \in t.\outputs} o.\val - \end{displaymath} - -\item - \label{rule:no-double-spending-2} - \textbf{No output is double spent} - \begin{displaymath} - \textrm{If } i_1, i_2 \in t.\inputs \textrm{ and } i_1.\outputref = i_2.\outputref - \textrm{ then } i_1 = i_2. - \end{displaymath} - -\item - \label{rule:all-inputs-validate-2} - \textbf{All inputs validate} - \begin{displaymath} - \textrm{For all } i \in t.\inputs,\ \applyScript{i.\validator}(i.\datum,\, i.\redeemer, - \, \toData_2(\mkContext_2(t, i, l))) = \true - \end{displaymath} - -\item - \label{rule:validator-scripts-hash-2} - \textbf{Validator scripts match output addresses} - \begin{displaymath} - \textrm{For all } i \in t.\inputs,\ \scriptAddr(i.\validator) = \getSpent(i, l).\addr - \end{displaymath} - -\item - \label{rule:datum-objects-hash-2} - \textbf{Datum objects match output hashes} - \begin{displaymath} - \textrm{For all } i \in t.\inputs,\ \hashData(i.\datum) = \getSpent(i, l).\datumHash - \end{displaymath} - -\item - \label{rule:all-mpss-run} - \textbf{All monetary policy scripts evaluate to true} - \begin{displaymath} - \textrm{For all } s \in t.\forgeScripts,\ \applyMPScript{s}(\toTxData(t)) = \true - \end{displaymath} - -\end{enumerate} -\caption{Validity of a transaction $t$ in the EUTXO-2 model} -\label{fig:eutxo-2-validity} -\end{ruledfigure} - -\subsection{Remarks} -\paragraph{Monetary policies.} -Rules~\ref{rule:custom-forge} and~\ref{rule:all-mpss-run} can be used -to enforce monetary policies for custom currencies: see -Note~\ref{note:monetary-policies} for a detailed explanation. - -\paragraph{Preservation of value over \qtymap{}.} -In \cref{rule:value-is-preserved-2}, -$+$ and $\sum$ operate over \qtymap{}, which is -a finitely-supported function (which, with their operations, -are defined in \cref{sec:fsfs}). Preservation of value -in this model essentially requires that the -quantities of each of the individual currencies involved in the -transaction are preserved. - -\paragraph{Preservation of value and forging.} -Recall that values in $\forge$ can be negative whereas values in -outputs must be non-negative. This allows currency to be destroyed as -well as created. Rule~\ref{rule:value-is-preserved-2} implies that a -transaction is invalid if it attempts to destroy more of a currency -than is actually available in its inputs. - -\paragraph{Validation on Cardano.} -Cardano adds an additional rule in Fig~\ref{fig:cardano-fee-validity}, which -asserts that fees are paid exclusively in the native currency. - -\begin{ruledfigure}{H} - \textbf{Fees are paid in the native currency} - \begin{displaymath} - \support(t.\fee) = \{ \nativeCur \} \textrm{ and } - \support(t.\fee(\nativeCur)) = \{ \nativeTok \} - \end{displaymath} - \caption{Validity of a transaction $t$ in the EUTXO-2 model} - \label{fig:cardano-fee-validity} -\end{ruledfigure} - -\subsection{The EUTXO-2 model in practice.} -See~\cite{Plutus-book} for examples of contracts which make use of the -features of the EUTXO-2 model. See also -\cref{note:monetary-policies,note:eutxo-2-implications,note:eutxo-2-performance} -for comments on some technical aspects of the model. - -\appendix -\section{Comments} -\label{appendix:comments} - -\note{Computing with finitely-supported functions.} -\label{note:finitely-supported-functions} -We intend that finitely-supported functions are implemented as finite -maps, with a failed map lookup corresponding to returning 0. - -However, there are two apparent difficulties: -\begin{enumerate} - \item The domain of a map does not correspond to the support of the function: - values may be mapped to zero, thus appearing in the domain but not the support. - \item Pointwise equality is hard to compute. -\end{enumerate} - -However, both of these are easily ameliorated. We say that a set $w$ is a \textit{weak support} -of a finitely-supported function $f$ if $\support(f) \subseteq w$. That is, a -weak support contains all the points that are non-zero, but possibly also some -points that are zero. It is easy to see that the domain of a map is a weak -support for the finitely-supported function it represents. - -We can compute the support from the weak support by simply checking the function -at each value and removing those that are zero. This is potentially expensive, -but we only need to do it when we need the support, which we only do during the -computation of \cref{rule:value-is-preserved-2}. - -Pointwise equality between two finitely-supported functions $f$ and $g$ is -equivalent to checking pointwise equality only over the union of $\support(f)$ -and $\support(g)$; or similarly over the union of a weak support of $f$ and of -$g$. In particular, for finitely-supported functions represented as maps, we can -check pointwise equality over the union of their domains. - -The same applies to checking partial ordering pointwise, which can similarly -be done over the union of the weak support. - -\medskip - -Mathematically, most of our finiteness restrictions are not strictly -required. For example, in the EUTXO-2 model -(Section~\ref{sec:eutxo-2}) we could allow transactions with -infinitely many monetary policy scripts and outputs with -non-finitely-supported quantities of token currencies; as long as the -number of inputs and outputs of each transaction is finite the model -remains mathematically sound. However, the finite model presented in -this document is more realistic from the point of view of real-world -implementation. - -\note{The Basic UTXO model: Outputs and scripts.} -\label{note:basic-utxo} -There is no well-defined notion of ownership for UTXOs. In many -transactions an output will accrue to a single user who is then -entitled to spend it at a later date. However, in general the notion -of ownership is more complex: an output of a transaction might require -the cooperation of several users before it could be spent, or it might -not be spendable until some other condition has been met (for example -a certain period of time may have to pass). At the extremes, an -output could be spendable by anyone, or by no-one. - -In order to deal with this complexity, an output can be locked by a -\textit{script}% -\footnote{In the Cardano setting, scripts are Plutus Core - programs~\citep{Plutus-Core-spec}.} -which must be supplied with suitable evidence to unlock the output. -In the basic model, each input to a transaction comes with a -\i{validator} script which checks that the transaction is allowed to -spend the output. In order to spend an output, the transaction -supplies an object of type $\Data$, called the \i{redeemer}, which provides -evidence that the transaction has the authority to do so;\footnote{The - validator plays a role similar to that of BitCoin's - \texttt{scriptPubKey} and the redeemer to \texttt{scriptSig}. -} a process called \i{validation} is then performed which checks that -the redeemer satisfies the conditions required by the validator. -Before a transaction can proceed, all inputs must be successfully -validated: if one or more inputs fails to validate then the -transaction is rejected. - -A simple example of this is a \i{pay-to-pubkey} script, where the -redeemer consists of a signature for the current transaction produced -using a private key belonging to the owner of the output. The -validator script (provided by the owner of the output) would check the -signature using a known public key: if the public key corresponds to -the private key then validation succeeds, otherwise it fails. Thus -the output can only be spent by the owner of the relevant private key - -See \cref{note:scripts} for more information about validators in -the EUTXO setting. - -\note{Inputs and outputs.} -\label{note:inputs-and-outputs} -A transaction has a \textsf{Set} of inputs but a \textsf{List} of outputs. -This is for two reasons: -\begin{itemize} - \item We need a way to uniquely identify a transaction output, so - that it can be referred to by a transaction input that spends it. The pair of - a transaction id and an output index is sufficient for this, but other schemes - are conceivable. - \item Equality of transaction outputs is defined structurally. But that means - that if we had two outputs paying $X$ to address $A$, then they would be - equal and therefore if we kept them in a \s{Set} one would be lost. -\end{itemize} - -\noindent An alternative design would be to include a unique nonce in transaction outputs -(effectively: their index in the list), and then we could use this to identify -them (and distinguish them from each other), and so we could keep them in a \s{Set} instead. - -\note{The datum.} -\label{note:datum} -The introduction of the datum -increases the expressivity of the model considerably. For example, -one can use a datum to propagate state between -transactions, and this can be used to give a contract the structure -of a finite state machine; the fact that the datum is part -of the output and not the transaction means that the state can -change without the transaction changing, which makes it easier to -have an ``identity'' for an ongoing contract. - -\note{Fees and Costs.} -\label{note:fees} -Users may have to pay a fee in order to have a transaction executed. -In a public blockchain an important reason for this is to deter -hostile agents from carrying out denial of service attacks by -submitting transactions which take a long time or use excessive -amounts of memory. The precise details of fees in Cardano are outwith -the scope of this document, and indeed have not been fully decided at -the time of writing. However, we expect that the fee will include a -component based on the size of the transaction (including its -associated scripts), and also a so-called \textit{gas} charge to cover -execution costs. We will have a model specifying the costs of -individual operations during script execution; costs will be monitored -dynamically during execution, and if the gas consumed ever exceeds the -amount covered by the fee then the transaction will fail. - -\note{Scripts and Hashes.} -\label{note:scripts} The spendability of an output is determined by -its validator, and thus the validator for an output must be -known at the time when the output is created (a completely new validator -may be created, or an existing validator may be re-used). - -Conceptually the validator is part of the output, so it may be rather -unexpected that \cref{fig:eutxo-1-types} defines the validator -to be part of an \textit{input}, with the output only containing the -address of the validator. The rationale for this is that a validator -$V$ for an output $O$ is not required until $O$ is actually spent, -which may be some time after $O$ was created. Recall from -\cref{note:fees} that the cost of executing a transaction depends -on the size of transaction, including the associated scripts. Thus -the transaction that produces the validator only pays for the size of -a hash (32 bytes) and the transaction that runs it pays for the full -size of the script. - -This strategy also helps to reduce on-chain -storage requirements, since validators can be stored off-chain until -needed (and the presence of the hash in the output can be used to -check that the correct validator is in fact being used when validation -occurs), but unspent outputs persist on-chain in the UTXO set until -they are eventually spent. - -The same strategy applies to datum objects. - -\note{Datum witnesses.} -\label{note:datum-witnesses} -Although a datum is only recorded as a hash in a transaction output, -it is useful to be able to record the full value of the datum on the -transaction that \emph{creates} an output: this allows observers to -determine the full datum without it having to be kept in the UTXO set. - -This mechanism is \emph{optional}, since it incurs an increase in transaction -size (and hence cost), and some clients may want to transmit the information -off-chain instead to minimise these costs. - -Hence there is a $\datumWits$ field on transactions, which \emph{may} -contain mappings from the $\DataHash$es used in the transaction to -their \Data{} values. This information is also present in \ctx{}. - -\note{Datum objects in \ctx{}.} -\label{note:datum-objects-in-ptx} -In \cref{fig:ptx-1-types,fig:ptx-2-types} the -\textsf{OutputInfo} does not include the datum attached to the output. -These may be found in $\datumWits$. - -Having access to the value of the datum allows a validator to inspect -an outgoing datum, for example to confirm that its contents are -correct in some sense. This can be useful when a datum is used to -propagate information about the state of a contract to later -transactions. See~\cite{Plutus-book} for examples of this. - -\note{Determinism of the validation process.} -\label{note:validation-determinism} The \ctx{} type is the only -information about the ``outside world'' available to a validator -at the time of validation. Allowing the validator access to -this information gives the EUTXO models a considerable amount of -power, as can be seen from the example contracts -in~\cite{Plutus-book}. However, it is important not to make too much -information available to the validator. The choice of the \ctx{} type -above means that the information available to the validator is -essentially independent of the state of the blockchain, and in -particular, it is independent of time (note that the check that the -current slot number is within a transaction's validity range takes -place \textit{before} validation is initiated, and the slot number is -not passed to the validator (although the validity range is)). This -implies that validation is \textit{determinisitic} and validators can -be run off-chain in order to determine their execution cost before -on-chain validation actually occurs. This helps users to calculate -transaction fees in advance and avoid the possibility of their -transactions failing due to an insufficient fee having been paid (and -also avoids overpayment due to overestimating the fees). - -\todokwxm{This may be a little optimistic. For example, in the - crowdfunding contract we don't know in advance how many - contributions will be made, or whether a campaign will succeed or - fail. Thus we won't know how much the final transaction will cost - until just before it happens.} - -\note{Transaction lookup during validation.} -\label{note:tx-lookup-never-fails} -Note that the $\getSpent$ function of -Figure~\ref{fig:validation-functions-1} uses $\lookupTx$, which could -fail if the relevant transaction does not exist. However, we only use -$\getSpent$ during transaction validation, and in that case -Rule~\ref{rule:all-inputs-refer-to-unspent-outputs} of -Figure~\ref{fig:eutxo-1-validity} and -Rule~\ref{rule:all-inputs-refer-to-unspent-outputs-2} of -Figure~\ref{fig:eutxo-2-validity} ensure that all of the transaction -inputs refer to existing unspent outputs, and in these circumstances -$\lookupTx$ will always succeed for the transactions of interest. - - -\note{Monetary policies for custom currencies.} -\label{note:monetary-policies} -The new \textbf{Forging} rule in \cref{fig:eutxo-2-validity} enables -custom currencies to implement their own monetary policies: for -example, one might wish to place some limit on the amount of a -currency that can be forged, or restrict the creation of the currency -to owners of particular public keys. - -The idea is that a custom currency has a monetary policy which is -defined by some script $H$, and the address $h = \scriptAddr(H)$ is -used as the identifier of the currency. - -Whenever a new quantity of the currency is forged, -Rules~\ref{rule:custom-forge} and~\ref{rule:all-mpss-run} of -Figure~\ref{fig:eutxo-2-validity} imply that $H$ must be contained in -the $\forgeScripts$ field of the transaction, and that it must be -successfully executed; $H$ is provided with the details of the -transaction via the $\Data$ object produced by $\toTxData$, so it has -access to the \forge{} field of the transaction and knows how much of -the currency is to be forged and can respond -appropriately.\footnote{We do not insist that every monetary policy - script in a transaction is associated with a currency which the - transaction is actually forging, so a transaction may include - apparently unnecessary monetary policy scripts. The creator of the - transaction is responsible for providing the contents of the - $\forgeScripts$ field and is free to include or exclude such scripts - as they see fit.} - -The advantage of this scheme is that custom currencies can be handled -entirely within the smart contract system, without the need to -introduce any extra blockchain infrastructure such as a central -registry of custom currencies. - -In practice some refinement of this scheme will be required in order -to (a) allow re-use of a monetary policy for different currencies, and -(b) prevent unauthorised forging of a currency. To deal with (a) we -can make the monetary policy script unique by including a nonce. This -still doesn't prevent unauthorised people from using the script $H$ to -produce currency, but this can be prevented by, for instance, -embedding a reference to an unspent output in the script and requiring -that the currency can only be forged if the referenced output is spent -at the same time, so it can only be forged once. - -\note{Implications of the EUTXO-2 model.} -\label{note:eutxo-2-implications} -The EUTXO-2 model and the techniques described in -\cref{note:monetary-policies} allow us to implement fungible -(normal) and non-fungible token currencies, as well as ``mixed -states'': -\begin{itemize} -\item Standard (fungible) currencies are implemented by issuing - currencies with a single \token{}. -\item Non-fungible token currencies are implemented by only ever - issuing single quantities of many unique \token{}s. -\item Note that there is nothing in this model which enforces - uniqueness: having multiples of a single \token{} merely means that - those can be used fungibly. If a currency wants to make sure it only - issues unique tokens it must track this itself. These ``mixed'' token - currencies can have many \token{}s, but these can have more than unit - quantities in circulation. These can be useful to model distinct - categories of thing where there are fungible quantities within - those, for example share classes. -\end{itemize} - -\note{Performance issues for EUTXO-2.} -\label{note:eutxo-2-performance} -The EUTXO-2 model will lose some efficiency in comparison to the EUTXO-1 -model, simply because the data structures are more complicated. This -would even apply to transactions which only involve the native -currency (if there is one), since it would be necessary to check whether the \qtymap{} -contains anything that needs to be processed. If this is a concern -then one could implement a model with two types of transaction, -essentially just the disjoint union of the EUTXO-1 and EUTXO-2 -transaction types. A simple case distinction at the start of a -transaction could then select either a fast native-currency-only -computation or a slower multicurrency computation. This would be -harder to maintain though. - -\smallskip Another optimisation would be possible if one wished to -implement custom currencies but not NFTs: since in this case every -currency would only have a single token, the tokens could be omitted -and the \qtymap{} replaced with a map from currency ids to quantities. - -\smallskip A more significant cost may be that we can no longer use -\verb|{-# UNPACK #-}| when our \qty{} type stops being a simple -combination of wrappers and products around primitives, but this is -again an issue with any multi-currency proposal. - -\bibliographystyle{plainnat} %% ... or whatever -\bibliography{extended-utxo-specification} - - -\end{document} diff --git a/extended-utxo-spec/figures/Makefile b/extended-utxo-spec/figures/Makefile deleted file mode 100644 index 0ff03b748d..0000000000 --- a/extended-utxo-spec/figures/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# Empty makefile, just in case we ever need to do anything fancy in here. - -all: ; - -clean: ; diff --git a/fake-pab/LICENSE b/fake-pab/LICENSE deleted file mode 100644 index 0c8a80022e..0000000000 --- a/fake-pab/LICENSE +++ /dev/null @@ -1,53 +0,0 @@ -Apache License - -Version 2.0, January 2004 - -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of this License; and -You must cause any modified files to carry prominent notices stating that You changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. - -You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/fake-pab/NOTICE b/fake-pab/NOTICE deleted file mode 100644 index 536c4061cb..0000000000 --- a/fake-pab/NOTICE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright 2019 Input Output (Hong Kong) Ltd. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/fake-pab/README.md b/fake-pab/README.md deleted file mode 100644 index 5f3def9418..0000000000 --- a/fake-pab/README.md +++ /dev/null @@ -1 +0,0 @@ -# Fake PAB diff --git a/fake-pab/app/Main.hs b/fake-pab/app/Main.hs deleted file mode 100644 index 4b24285bba..0000000000 --- a/fake-pab/app/Main.hs +++ /dev/null @@ -1,106 +0,0 @@ -{-# LANGUAGE ApplicativeDo #-} -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} - -module Main - ( main, - ) -where - -import Control.Immortal (create, stop) -import Control.Monad.IO.Class (MonadIO, liftIO) -import Control.Monad.Logger (MonadLogger, logInfoN, runStderrLoggingT) -import qualified Data.Text as Text -import Network.Wai.Handler.Warp (HostPreference, defaultSettings, setHost, setPort) -import Options.Applicative (CommandFields, Mod, Parser, argument, auto, command, customExecParser, - disambiguate, fullDesc, help, helper, idm, info, long, metavar, option, - prefs, progDesc, short, showDefault, showHelpOnEmpty, showHelpOnError, str, - strOption, subparser, value) -import qualified PSGenerator -import qualified Webserver - --- | You might wonder why we don't stick everything in `Config`. The --- answer is that pushing certain flags to the command line makes --- automated deployment easier. --- --- You might also wonder why we don't stick everything on the command --- line. The answer is for flags that rarely change, putting them in a --- config file makes development easier. -data Command - = Webserver - { _host :: !HostPreference, - _port :: !Int, - _static :: !FilePath, - _connection_string :: !String - } - | PSGenerator {_outputDir :: !FilePath} - deriving (Show, Eq) - -commandParser :: Parser Command -commandParser = subparser $ webserverCommandParser <> psGeneratorCommandParser - -psGeneratorCommandParser :: Mod CommandFields Command -psGeneratorCommandParser = - command "psgenerator" $ - flip info (fullDesc <> progDesc "Generate the frontend's PureScript files.") $ do - _outputDir <- - argument - str - ( metavar "OUTPUT_DIR" - <> help "Output directory to write PureScript files to." - ) - pure PSGenerator {..} - -webserverCommandParser :: Mod CommandFields Command -webserverCommandParser = - command "webserver" $ - flip info fullDesc $ do - _host <- - strOption - ( short 'b' <> long "bind" <> help "Webserver bind address" - <> showDefault - <> value "127.0.0.1" - ) - _port <- - option - auto - ( short 'p' <> long "port" <> help "Webserver port number" - <> showDefault - <> value 8080 - ) - _static <- - strOption - ( short 's' <> long "static-path" <> help "Location of static files to serve" - <> showDefault - <> value "." - ) - _connection_string <- - strOption - ( short 'c' <> long "connection-string" <> help "Connection string for PosgreSQL database" - <> showDefault - <> value "" - ) - pure Webserver {..} - -runCommand :: (MonadIO m, MonadLogger m) => Command -> m () -runCommand Webserver {..} = liftIO - do minerThread <- create $ const $ Webserver.miner _connection_string - Webserver.run _connection_string _static settings - stop minerThread - where - settings = setHost _host . setPort _port $ defaultSettings -runCommand PSGenerator {..} = liftIO $ PSGenerator.generate _outputDir - -main :: IO () -main = do - options <- - customExecParser - (prefs $ disambiguate <> showHelpOnEmpty <> showHelpOnError) - (info (helper <*> commandParser) idm) - runStderrLoggingT $ do - logInfoN $ "Running: " <> Text.pack (show (hideConnectionString options)) - runCommand options - where hideConnectionString (Webserver h p s _) = Webserver h p s "