From 742a71c507362737126e4785e8b362b1450b1f6b Mon Sep 17 00:00:00 2001 From: Tuomo Tanskanen Date: Wed, 24 Jan 2024 13:19:52 +0200 Subject: [PATCH] add markdownlint script and fix the complaints - Add markdownlint script and fix the complaints. This replaces pre-commit/prettier. - Add shellcheck script to replace pre-commit/shfmt. - Fix markdownlint issues. Signed-off-by: Tuomo Tanskanen --- .markdownlint-cli2.yaml | 9 + .pre-commit-config.yaml | 17 +- GUIDELINES.md | 300 ++++++--- README.md | 27 +- _faqs/.markdownlint-cli2.yaml | 4 + _faqs/baremetal-operator.md | 3 +- _faqs/boot-processes.md | 4 +- _faqs/capm3.md | 5 +- _faqs/cluster-api.md | 3 +- _faqs/cpu-architectures.md | 3 +- _faqs/ipmi.md | 7 +- _faqs/kinds-of-operating-systems.md | 4 +- _faqs/metal3-support-provisioners.md | 4 +- _faqs/network-configuration.md | 3 +- _faqs/openstack.md | 4 +- _faqs/operating-system-installer.md | 5 +- _faqs/out-of-band.md | 9 +- _faqs/using-metal3-independently.md | 4 +- _faqs/what-is-Ironic.md | 5 +- _faqs/what-is-an-operator.md | 8 +- _faqs/what-is-cleaning.md | 4 +- _faqs/what-is-inspection.md | 6 +- _faqs/what-is-ipxe.md | 4 +- _faqs/what-is-virtual-media.md | 3 +- _faqs/why-use-Ironic.md | 4 +- _includes/.markdownlint-cli2.yaml | 4 + _includes/docs.md | 13 +- _includes/privacy-statement.md | 22 +- _posts/.markdownlint-cli2.yaml | 5 + ...make_Kubernetes_on_bare_machines_simple.md | 52 +- ...c_for_Declarative_Bare_Metal_Kubernetes.md | 2 +- _posts/2019-06-25-Metal3.md | 2 +- ...ucture_All-Abstractions-Start-Somewhere.md | 4 + ...netes_native_bare_metal_host_management.md | 4 + ...-metal3_deploy_kubernetes_on_bare_metal.md | 4 + ...-02-18-metal3-dev-env-install-deep-dive.md | 636 +++++++++++++----- ...20-02-27-talk-kubernetes-finland-metal3.md | 8 +- ...l3-dev-env-BareMetal-Cluster-Deployment.md | 292 +++++--- _posts/2020-07-05-raw-image-streaming.md | 3 +- _posts/2021-05-05-Pivoting.md | 24 +- ...22-07-08-One_cluster_multiple_providers.md | 215 +++--- hack/markdownlint.sh | 20 + hack/shellcheck.sh | 19 + 43 files changed, 1258 insertions(+), 520 deletions(-) create mode 100644 .markdownlint-cli2.yaml create mode 100644 _faqs/.markdownlint-cli2.yaml create mode 100644 _includes/.markdownlint-cli2.yaml create mode 100644 _posts/.markdownlint-cli2.yaml create mode 100755 hack/markdownlint.sh create mode 100755 hack/shellcheck.sh diff --git a/.markdownlint-cli2.yaml b/.markdownlint-cli2.yaml new file mode 100644 index 000000000..7811fc663 --- /dev/null +++ b/.markdownlint-cli2.yaml @@ -0,0 +1,9 @@ +# Reference: https://github.com/DavidAnson/markdownlint-cli2#markdownlint-cli2yaml + +config: + ul-indent: + # Kramdown wanted us to have 3 earlier, tho this CLI recommends 2 or 4 + indent: 3 + +# Don't autofix anything, we're linting here +fix: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5e810a71d..92af78853 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: rev: v2.2.1 hooks: - id: prettier - files: \.(css|js|md|markdown|json) + files: \.(css|js|json) - repo: https://github.com/python/black rev: 20.8b1 hooks: @@ -41,21 +41,6 @@ repos: rev: 3.9.0 hooks: - id: flake8 -- repo: local - hooks: - - id: shfmt - name: shfmt - minimum_pre_commit_version: 2.4.0 - language: golang - additional_dependencies: - - mvdan.cc/sh/v3/cmd/shfmt@v3.1.1 - entry: shfmt - args: - - -w - - -i - - '0' - types: - - shell - repo: https://github.com/asottile/blacken-docs rev: v1.12.1 hooks: diff --git a/GUIDELINES.md b/GUIDELINES.md index 827ac9e73..08dd6bb4a 100644 --- a/GUIDELINES.md +++ b/GUIDELINES.md @@ -1,61 +1,102 @@ # Contents Guidelines -This document describes a set of guidelines for generating content for [metal3.io](https://metal3.io), exceptions can be made if and when it makes sense, but please try to follow this guide as much as possible. +This document describes a set of guidelines for generating content for +[metal3.io](https://metal3.io), exceptions can be made if and when it +makes sense, but please try to follow this guide as much as possible. ## General contents guidelines -Please use the following as general guidelines on any kind of content generated for this site: +Please use the following as general guidelines on any kind of content +generated for this site: ### Technical setup -- Install `pre-commit` in your system and from the repository folder run `pre-commit install` so that the git hook is in place. - - It will avoid commits to the `source` and `master` branch - - It will spell-check articles before the commit can be performed (TODO: re-add some spellcheck tool) - - Adjust some formatting in markdown like tables, spaces before and after headings, etc (via prettifier) - - If you're using `npm` you can also add pre-commit as a dependency for development so that it incorporates the `pre-commit` hook and it also spellchecks before you submit to CI and risk getting a failure in build. To do so, use: `npm install --save-dev pre-commit` +- Install `pre-commit` in your system and from the repository folder run + `pre-commit install` so that the git hook is in place. + - It will avoid commits to the `source` and `master` branch + - It will spell-check articles before the commit can be performed + (TODO: re-add some spellcheck tool) + - Adjust some formatting in markdown like tables, spaces before and + after headings, etc (via prettifier) + - If you're using `npm` you can also add pre-commit as a dependency + for development so that it incorporates the `pre-commit` hook and it + also spellchecks before you submit to CI and risk getting a failure + in build. To do so, use: `npm install --save-dev pre-commit` ### Content -- Follow [Kramdown Quick Reference](https://kramdown.gettalong.org/quickref.html) for syntax reference -- Split the contents into sections using the different levels of headers that Markdown offers - - Keep in mind that once rendered, the title you set in the Front Matter data will use _H1_, so start your sections from _H2_ -- Closing section, the same way we can add a brief opening section describing what the contents are about, it's very important to add a closing section with thoughts, upcoming work on the topic discussed, encourage readers to test something and share their findings/thoughts, joining the community, ... keep in mind that this will probably be the last thing the reader will read -- [Code blocks](https://kramdown.gettalong.org/syntax.html#code-blocks), use them for: - - code snippets - - file contents - - console commands - - ... - - Use the proper tag to let the renderer know what type of contents your including in the block for syntax highlighting -- Consistency is important, makes it easier for the reader to follow along, for instance: - - If you're writing about something running on OCP, use `oc` consistently, don't mix it up with `kubectl` - - If you add your shell prompt to your console blocks, add it always or don't, but don't do half/half -- Use backticks (`) when mentioning commands on your text, like we do in this document -- Use _emphasis/italics_ for non-English words such as technologies, projects, programming language keywords... -- Use bullet points, these are a great way to clearly express ideas through a series of short and concise messages - - Express clear benefit. Think of bullets as mini-headlines - - Keep your bullets symmetrical. 1-2 lines each - - Avoid bullet clutter. Don’t write paragraphs in bullets - - Remember bullets are not sentences. They’re just like headlines +- Follow [Kramdown Quick + Reference](https://kramdown.gettalong.org/quickref.html) for syntax + reference +- Split the contents into sections using the different levels of headers + that Markdown offers + - Keep in mind that once rendered, the title you set in the Front + Matter data will use _H1_, so start your sections from _H2_ +- Closing section, the same way we can add a brief opening section + describing what the contents are about, it's very important to add a + closing section with thoughts, upcoming work on the topic discussed, + encourage readers to test something and share their findings/thoughts, + joining the community, ... keep in mind that this will probably be the + last thing the reader will read +- [Code blocks](https://kramdown.gettalong.org/syntax.html#code-blocks), + use them for: + - code snippets + - file contents + - console commands + - ... + - Use the proper tag to let the renderer know what type of contents + your including in the block for syntax highlighting +- Consistency is important, makes it easier for the reader to follow + along, for instance: + - If you're writing about something running on OCP, use `oc` + consistently, don't mix it up with `kubectl` + - If you add your shell prompt to your console blocks, add it always + or don't, but don't do half/half +- Use backticks (`) when mentioning commands on your text, like we do in + this document +- Use _emphasis/italics_ for non-English words such as technologies, + projects, programming language keywords... +- Use bullet points, these are a great way to clearly express ideas + through a series of short and concise messages + - Express clear benefit. Think of bullets as mini-headlines + - Keep your bullets symmetrical. 1-2 lines each + - Avoid bullet clutter. Don’t write paragraphs in bullets + - Remember bullets are not sentences. They’re just like headlines - Use of images - - Images are another great way to express information, for instance, instead of trying to describe your way around a UI, just add a snippet of the UI, readers will understand it easier and quicker - - Avoid large images, if you have to try to resize them, otherwise the image will be wider than the writing when your contents is rendered - - Linking or HTTP references - - Linking externally can be problematic, some time after the publication of your contents, try linking to the repositories or directories, website's front page rather than to a page, etc. - - For linking internally use [Jekyll's tags](https://jekyllrb.com/docs/liquid/tags/#links) + - Images are another great way to express information, for instance, + instead of trying to describe your way around a UI, just add a + snippet of the UI, readers will understand it easier and quicker + - Avoid large images, if you have to try to resize them, otherwise the + image will be wider than the writing when your contents is rendered + - Linking or HTTP references + - Linking externally can be problematic, some time after the + publication of your contents, try linking to the repositories or + directories, website's front page rather than to a page, etc. + - For linking internally use [Jekyll's + tags](https://jekyllrb.com/docs/liquid/tags/#links) - For blog posts - - Use macro `{% post_url FILENAME.WITHOUT.EXTENSION %}` instead of regular URI + - Use macro `{% post_url FILENAME.WITHOUT.EXTENSION %}` instead + of regular URI - For pages, collections, assets, etc - - For document linking: `{% link _collection/name-of-document.md %}` instead of regular URI - - For file linking: `{% link /assets/files/doc.pdf %}` instead of regular URI + - For document linking: + `{% link _collection/name-of-document.md %}` instead of regular URI + - For file linking: `{% link /assets/files/doc.pdf %}` instead + of regular URI ## Contents types ### Blog Posts -All Blog posts are located in the [blog/\_posts](blog/_posts/) directory, and all FAQ posts in the [faqs/\_posts](faqs_posts), on them, each entry is a Markdown file with extension _.md_ or _.markdown_. For creating a blog post for [metal3.io ](https://metal3.io), you need to complete the following steps. +All Blog posts are located in the [blog/\_posts](blog/_posts/) +directory, and all FAQ posts in the [faqs/\_posts](faqs_posts), on them, +each entry is a Markdown file with extension _.md_ or _.markdown_. For +creating a blog post for [metal3.io](https://metal3.io), you need to +complete the following steps. -- Create a markdown file with the _YYYY-MM-DD-TITLE.markdown_ naming convention -- For the [Front Matter](https://jekyllrb.com/docs/front-matter/), you need to add the following: +- Create a markdown file with the _YYYY-MM-DD-TITLE.markdown_ naming + convention +- For the [Front Matter](https://jekyllrb.com/docs/front-matter/), you + need to add the following: ```yaml --- @@ -72,61 +113,89 @@ All Blog posts are located in the [blog/\_posts](blog/_posts/) directory, and al ``` - - **layout**: Defines style settings for different types of contents. All blog posts use the _posts_ layout - - **author**: Sets the author's name, will publicly appear on the post. As a rule of thumb, use your GitHub username, Twitter handler or any other identifier known in the community - - **title**: The title for your blog post - - **description**: Short extract of the blog post - - **navbar_active**: Defines settings for the navigation bar, type _Blogs_ is the only choice available - - **pub-date**: Month and day, together with _pub-year_ form the date that will be shown in the blog post as the date it was published, must match the date on the file name - - **pub-year**: Blog post publication year, must match the year in the file name - - **category**: Array of categories for your blog post, some common ones are community, news and releases, as last resort, use uncategorized. If you'd like to add multiple categories, used _categories_ instead of _category_ and a [YAML list](https://en.wikipedia.org/wiki/YAML#Basic_components) - - **comments**: This enables comments your blog post. Please consider setting this to _true_ and allow discussion around the topic you're writing, otherwise skip the field or set it to false + - **layout**: Defines style settings for different types of contents. + All blog posts use the _posts_ layout + - **author**: Sets the author's name, will publicly appear on the + post. As a rule of thumb, use your GitHub username, Twitter handler + or any other identifier known in the community + - **title**: The title for your blog post + - **description**: Short extract of the blog post + - **navbar_active**: Defines settings for the navigation bar, type + _Blogs_ is the only choice available + - **pub-date**: Month and day, together with _pub-year_ form the date + that will be shown in the blog post as the date it was published, + must match the date on the file name + - **pub-year**: Blog post publication year, must match the year in the + file name + - **category**: Array of categories for your blog post, some common + ones are community, news and releases, as last resort, use + uncategorized. If you'd like to add multiple categories, used + _categories_ instead of _category_ and a [YAML + list](https://en.wikipedia.org/wiki/YAML#Basic_components) + - **comments**: This enables comments your blog post. Please consider + setting this to _true_ and allow discussion around the topic you're + writing, otherwise skip the field or set it to false - Blog post contents recommendation: - - Title is a very important piece of your blog post, a catchy title will likely have more readers, write a bad title and no matter how good the contents is, you'll likely get less readers - - After the title, write a brief introduction of what you're going to be writing about, which will help the reader to get a grasp on the topic - - Closing section, the same way we can add a brief introduction of what the blog post is about, it's very important to add a closing section with thoughts, upcoming work on the topic discussed, encourage readers to test something and share their findings, joining the community, ... + - Title is a very important piece of your blog post, a catchy title + will likely have more readers, write a bad title and no matter how + good the contents is, you'll likely get less readers + - After the title, write a brief introduction of what you're going to + be writing about, which will help the reader to get a grasp on the + topic + - Closing section, the same way we can add a brief introduction of + what the blog post is about, it's very important to add a closing + section with thoughts, upcoming work on the topic discussed, + encourage readers to test something and share their findings, + joining the community, ... + + - For big images, you can use photoswipe to show a miniature that is + zoomable, to do so, insert code like this: + + ```html + + ``` - * For big images, you can use photoswipe to show a miniature that is zoomable, to do so, insert code like this: + It's very important to define the original image size in `data-size` + to match the current image size and adjust the `` fields + `width` and `height` to the miniature you want to use. - ```html - - ``` - - It's very important to define the original image size in `data-size` to match the current image size and adjust the `` fields `width` and `height` to the miniature you want to use. - - If there's more than one image, that you want to be shown together, leave the `
` and add another `
` entry within it. - - Do not change the name of the div class. You can use the `figcaption` inner text to show as title for the image. + If there's more than one image, that you want to be shown together, + leave the `
` and add another `
` entry within it. + + Do not change the name of the div class. You can use the + `figcaption` inner text to show as title for the image. ### Pages -The _[Pages](https://jekyllrb.com/docs/pages/)_ are located at the [pages](/pages/) directory, to create one follow these steps: +The _[Pages](https://jekyllrb.com/docs/pages/)_ are located at the +[pages](/pages/) directory, to create one follow these steps: - Create the markdown file, _filename.md_, in [pages](/pages/) directory -- _Pages_ also use [Front Matter](https://jekyllrb.com/docs/front-matter/), here's an example: +- _Pages_ also use [Front + Matter](https://jekyllrb.com/docs/front-matter/), here's an example: ```yaml --- @@ -138,23 +207,35 @@ The _[Pages](https://jekyllrb.com/docs/pages/)_ are located at the [pages](/page ``` -- The fields have the same function as for blog posts, but some values are different, as we're producing different contents. +- The fields have the same function as for blog posts, but some values + are different, as we're producing different contents. - - **permalink** tells _Jekyll_ what the output path for your page will be, it's useful for linking and web indexers - - **navbar_active** will add your page to the navigation bar you specify as value, commonly used values are _Docs_ or _Videos_ - - **layout**, just use _default_ as value, it'll include all the necessary parts when your page is generated + - **permalink** tells _Jekyll_ what the output path for your page will + be, it's useful for linking and web indexers + - **navbar_active** will add your page to the navigation bar you + specify as value, commonly used values are _Docs_ or _Videos_ + - **layout**, just use _default_ as value, it'll include all the + necessary parts when your page is generated - As for the contents, follow the general guidelines above ### Labs -The Labs are usually a set of directed exercises with the objective of teaching something by practising it, e.g. metal3 101, which would introduce metal3 to new and potential users through a series of easy (101!) exercises. They are composed of a [Landing page](https://en.wikipedia.org/wiki/Landing_page) and the actual exercises. +The Labs are usually a set of directed exercises with the objective of +teaching something by practising it, e.g. metal3 101, which would +introduce metal3 to new and potential users through a series of easy +(101!) exercises. They are composed of a [Landing +page](https://en.wikipedia.org/wiki/Landing_page) and the actual +exercises. #### Lab landing page -[Landing pages](https://en.wikipedia.org/wiki/Landing_page) are the book cover for your lab, for creating it, please follow these steps: +[Landing pages](https://en.wikipedia.org/wiki/Landing_page) are the book +cover for your lab, for creating it, please follow these steps: -- Use the following Front Matter block includes data for the for your lab's [landing page](https://en.wikipedia.org/wiki/Landing_page), replacing the values by your own: +- Use the following Front Matter block includes data for the for your + lab's [landing page](https://en.wikipedia.org/wiki/Landing_page), + replacing the values by your own: ```yaml --- @@ -167,16 +248,20 @@ navbar_active: Labs ``` -- Modify **title** and **permalink**, and leave the rest as shown in the example +- Modify **title** and **permalink**, and leave the rest as shown in the + example - For the contents, some recommendations: - - Describe the lab objectives clearly. - - Clearly state the requirements if any, e.g. laptop, cloud account, ... - - Describe what anyone would learn when taking the lab. - - Add references to documentation, projects, ... + - Describe the lab objectives clearly. + - Clearly state the requirements if any, e.g. laptop, cloud account, + ... + - Describe what anyone would learn when taking the lab. + - Add references to documentation, projects, ... #### Lab pages -These are the pages containing actual lab, exercises, documentations, etc... and each of them has to include a similar Front Matter block to the one that follows: +These are the pages containing actual lab, exercises, documentations, +etc... and each of them has to include a similar Front Matter block to +the one that follows: ```yaml --- @@ -187,10 +272,19 @@ lab: metal3 101 Lab order: 1 ``` -This time we've got a new field, _lab_, which matches the lab _title_ from the Front Matter block on the landing page above, this is used to build the table of contents. Both _order_ and _layout_ should stay as they are in the example and just adjust the _title_ and _permalink_. - -Again use the concepts from the general guidelines section and apply the following suggestions when it makes sense: - -- When asking to execute a command that'll produce output, add the output on the lab so the user knows what to expect. -- When working through labs that work on documented features, link to the official documentation either through out the lab or in a _reference_ section in the landing page. -- Be mindful about using files from remote Git repositories or similar, especially if they're not under your control, they might be gone after a while. +This time we've got a new field, _lab_, which matches the lab _title_ +from the Front Matter block on the landing page above, this is used to +build the table of contents. Both _order_ and _layout_ should stay as +they are in the example and just adjust the _title_ and _permalink_. + +Again use the concepts from the general guidelines section and apply the +following suggestions when it makes sense: + +- When asking to execute a command that'll produce output, add the + output on the lab so the user knows what to expect. +- When working through labs that work on documented features, link to + the official documentation either through out the lab or in a + _reference_ section in the landing page. +- Be mindful about using files from remote Git repositories or similar, + especially if they're not under your control, they might be gone after + a while. diff --git a/README.md b/README.md index 79e4f9165..322c462ad 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,9 @@ ## Contributing content -We more than welcome contributions in the form of blog posts, pages and/or labs, reach out if you happen to have an idea or find an issue with our content! [Here's our guideline for content](GUIDELINES.md). +We more than welcome contributions in the form of blog posts, pages +and/or labs, reach out if you happen to have an idea or find an issue +with our content! [Here's our guideline for content](GUIDELINES.md). ## Test your changes in a local container @@ -15,26 +17,37 @@ We more than welcome contributions in the form of blog posts, pages and/or labs, ```console cd metal3-io.github.io mkdir .jekyll-cache - podman run -d --name metal3io -p 4000:4000 -v $(pwd):/srv/jekyll:Z jekyll/jekyll jekyll serve --future --watch + podman run -d --name metal3io -p 4000:4000 \ + -v $(pwd):/srv/jekyll:Z jekyll/jekyll jekyll serve --future --watch ``` - **NOTE**: Make sure you are in the _metal3-io.github.io_ directory before running the above command as the Z at the end of the volume (-v) will relabel its contents so it can be written from within the container, like running `chcon -Rt svirt_sandbox_file_t -l s0:c1,c2` yourself. + **NOTE**: Make sure you are in the _metal3-io.github.io_ directory + before running the above command as the Z at the end of the volume + (-v) will relabel its contents so it can be written from within the + container, like running `chcon -Rt svirt_sandbox_file_t -l s0:c1,c2` + yourself. - On an OS without SELinux: ```console cd metal3-io.github.io mkdir .jekyll-cache - sudo docker run -d --name metal3io -p 4000:4000 -v $(pwd):/srv/jekyll jekyll/jekyll jekyll serve --future --watch + sudo docker run -d --name metal3io -p 4000:4000 \ + -v $(pwd):/srv/jekyll jekyll/jekyll jekyll serve --future --watch ``` ### View the site -Visit `http://0.0.0.0:4000` in your local browser. +Visit [http://0.0.0.0:4000](http://0.0.0.0:4000) in your local browser. The Metal3.io website is a Jekyll site, hosted with GitHub Pages. -All pages are located under `/pages`. Each section of the site is broken out into their respective folders - `/blogs` for the various Blog pages, `/docs` for the Documentation and `/videos` for the videos that are shared. +All pages are located under `/pages`. Each section of the site is broken +out into their respective folders - `/blogs` for the various Blog pages, +`/docs` for the Documentation and `/videos` for the videos that are +shared. All site images are located under `/assets/images`. Please do not edit these images. -Images that relate to blog entries are located under `/assets/images/BLOG_POST_TITLE`. The **BLOG_POST_TITLE** should match the name of the markdown file that you added under `/_posts`. +Images that relate to blog entries are located under +`/assets/images/BLOG_POST_TITLE`. The **BLOG_POST_TITLE** should match +the name of the markdown file that you added under `/_posts`. diff --git a/_faqs/.markdownlint-cli2.yaml b/_faqs/.markdownlint-cli2.yaml new file mode 100644 index 000000000..841c605a1 --- /dev/null +++ b/_faqs/.markdownlint-cli2.yaml @@ -0,0 +1,4 @@ +# Reference: https://github.com/DavidAnson/markdownlint-cli2#markdownlint-cli2yaml + +config: + first-line-h1: false diff --git a/_faqs/baremetal-operator.md b/_faqs/baremetal-operator.md index 07b793930..3b335077f 100644 --- a/_faqs/baremetal-operator.md +++ b/_faqs/baremetal-operator.md @@ -2,4 +2,5 @@ question: What is the baremetal operator? --- -Baremetal Operator is a Kubernetes controller providing support for several custom resources, most importantly - BareMetalHosts. +Baremetal Operator is a Kubernetes controller providing support for +several custom resources, most importantly - BareMetalHosts. diff --git a/_faqs/boot-processes.md b/_faqs/boot-processes.md index 981e83d7d..fe207619f 100644 --- a/_faqs/boot-processes.md +++ b/_faqs/boot-processes.md @@ -2,4 +2,6 @@ question: What kind of boot processes can be paired with specific BMC protocols? --- -Drivers with "virtual media" in their name can use the virtual media technology to boot an ISO remotely. The other drivers require network boot, more specifically - iPXE. +Drivers with "virtual media" in their name can use the virtual media +technology to boot an ISO remotely. The other drivers require network +boot, more specifically - iPXE. diff --git a/_faqs/capm3.md b/_faqs/capm3.md index 035996b29..6065fddfc 100644 --- a/_faqs/capm3.md +++ b/_faqs/capm3.md @@ -2,4 +2,7 @@ question: What is Cluster API provider Metal3 (CAPM3)? --- -CAPM3 is an [infrastructure provider](https://cluster-api.sigs.k8s.io/user/concepts#infrastructure-provider) for the Cluster API that uses Metal3 and Ironic to provision machines for your cluster. +CAPM3 is an +[infrastructure provider](https://cluster-api.sigs.k8s.io/user/concepts#infrastructure-provider) +for the Cluster API that uses Metal3 and Ironic to provision machines +for your cluster. diff --git a/_faqs/cluster-api.md b/_faqs/cluster-api.md index 085774799..857351ebb 100644 --- a/_faqs/cluster-api.md +++ b/_faqs/cluster-api.md @@ -2,4 +2,5 @@ question: How does Metal3 relate to Cluster API (CAPI)? --- -The Metal3 project includes the Cluster API Provider Metal3 (CAPM3) - an infrastructure provider for Cluster API. +The Metal3 project includes the Cluster API Provider Metal3 (CAPM3) - an +infrastructure provider for Cluster API. diff --git a/_faqs/cpu-architectures.md b/_faqs/cpu-architectures.md index e88bea4a4..9d2a50f93 100644 --- a/_faqs/cpu-architectures.md +++ b/_faqs/cpu-architectures.md @@ -2,4 +2,5 @@ question: What CPU architectures are supported? --- -Both x86_64 (Intel) and AARCH64 (Arm) are supported. Mixed architectures (e.g. some hosts x86_64, some - aarch64) are not supported yet. +Both x86_64 (Intel) and AARCH64 (Arm) are supported. Mixed architectures +(e.g. some hosts x86_64, some - aarch64) are not supported yet. diff --git a/_faqs/ipmi.md b/_faqs/ipmi.md index a5ec921a1..843309128 100644 --- a/_faqs/ipmi.md +++ b/_faqs/ipmi.md @@ -2,4 +2,9 @@ question: What is IPMI? --- -IPMI is the acronym for `Intelligent Platform Management Interface` which is used to monitor hardware health (fans, voltage, temperature, etc). The specification is available at and was created by a joint effort by several manufacturers. It allows us to also define the boot order and power status of the hardware. +IPMI is the acronym for `Intelligent Platform Management Interface` +which is used to monitor hardware health (fans, voltage, temperature, +etc). The specification is available at +[here](https://www.intel.com/content/www/us/en/products/docs/servers/ipmi/ipmi-home.html) +and was created by a joint effort by several manufacturers. It allows us +to also define the boot order and power status of the hardware. diff --git a/_faqs/kinds-of-operating-systems.md b/_faqs/kinds-of-operating-systems.md index 02333b8a4..54567652e 100644 --- a/_faqs/kinds-of-operating-systems.md +++ b/_faqs/kinds-of-operating-systems.md @@ -2,4 +2,6 @@ question: What kinds of operating systems can be installed? --- -You can use any operating system that is available in a cloud format (e.g. qcow2). If you need first boot configuration, the image has to contain cloud-init or a similar first-boot tool. +You can use any operating system that is available in a cloud format +(e.g. qcow2). If you need first boot configuration, the image has to +contain cloud-init or a similar first-boot tool. diff --git a/_faqs/metal3-support-provisioners.md b/_faqs/metal3-support-provisioners.md index e3eb49685..3fd7276ab 100644 --- a/_faqs/metal3-support-provisioners.md +++ b/_faqs/metal3-support-provisioners.md @@ -2,4 +2,6 @@ question: Does Metal3 support provisioners other than Ironic? --- -While it's technically possible to add more provisioners, only Ironic is supported now, and supporting other provisioners is not on the current roadmap. +While it's technically possible to add more provisioners, only Ironic is +supported now, and supporting other provisioners is not on the current +roadmap. diff --git a/_faqs/network-configuration.md b/_faqs/network-configuration.md index 46de1dbc5..a49e7e9cf 100644 --- a/_faqs/network-configuration.md +++ b/_faqs/network-configuration.md @@ -2,4 +2,5 @@ question: How can one supply network configuration during provisioning? --- -You can put it to the BareMetalHost's network Data field in the [OpenStack network data format](https://docs.openstack.org/nova/latest/_downloads/9119ca7ac90aa2990e762c08baea3a36/network_data.json). +You can put it to the BareMetalHost's network Data field in the +[OpenStack network data format](https://docs.openstack.org/nova/latest/_downloads/9119ca7ac90aa2990e762c08baea3a36/network_data.json). diff --git a/_faqs/openstack.md b/_faqs/openstack.md index 175d32c0e..414cda5df 100644 --- a/_faqs/openstack.md +++ b/_faqs/openstack.md @@ -2,4 +2,6 @@ question: Ironic is developed as part of OpenStack, does Metal3 require OpenStack? --- -Ironic can be used as a stand-alone service without any other OpenStack services. In fact, Baremetal Operator does not support any other OpenStack services. +Ironic can be used as a stand-alone service without any other OpenStack +services. In fact, Baremetal Operator does not support any other +OpenStack services. diff --git a/_faqs/operating-system-installer.md b/_faqs/operating-system-installer.md index a5fb0d223..6df5d4bf6 100644 --- a/_faqs/operating-system-installer.md +++ b/_faqs/operating-system-installer.md @@ -2,4 +2,7 @@ question: Can I use my own operating system installer with Metal3? --- -You can use the [live ISO workflow](https://book.metal3.io/bmo/live-iso) to attach a bootable ISO to the machine using virtual media. Note that Baremetal Operator will not track the installation process in this case and will consider the host active once the ISO is booted. +You can use the [live ISO workflow](https://book.metal3.io/bmo/live-iso) +to attach a bootable ISO to the machine using virtual media. Note that +Baremetal Operator will not track the installation process in this case +and will consider the host active once the ISO is booted. diff --git a/_faqs/out-of-band.md b/_faqs/out-of-band.md index 131fad463..a76963ef8 100644 --- a/_faqs/out-of-band.md +++ b/_faqs/out-of-band.md @@ -2,4 +2,11 @@ question: What is an out-of-band management controller? --- -Enterprise hardware usually has an integrated or optional controller that allows reaching the server even if it's powered down, either via dedicated or shared nic. This controller allows some checks on the server hardware and also perform some settings like changing power status, changing Boot Order, etc. The Baremetal Operator uses it to power on, reboot and provision the physical servers to be used for running workloads on top. Commercial names include `iDrac`, `iLO`, `iRMC`, etc and most of them should support `IPMI`. +Enterprise hardware usually has an integrated or optional controller +that allows reaching the server even if it's powered down, either via +dedicated or shared nic. This controller allows some checks on the +server hardware and also perform some settings like changing power +status, changing Boot Order, etc. The Baremetal Operator uses it to +power on, reboot and provision the physical servers to be used for +running workloads on top. Commercial names include `iDrac`, `iLO`, +`iRMC`, etc and most of them should support `IPMI`. diff --git a/_faqs/using-metal3-independently.md b/_faqs/using-metal3-independently.md index 6c8193875..5d52ee3c1 100644 --- a/_faqs/using-metal3-independently.md +++ b/_faqs/using-metal3-independently.md @@ -2,4 +2,6 @@ question: Do I need to use the Metal3 with Cluster API or can I use Metal3 independently? --- -It is completely optional to use Cluster API. You can use only the Baremetal Operator and skip CAPM3 completely if all you need is bare-metal provisioning via Kubernetes API. +It is completely optional to use Cluster API. You can use only the +Baremetal Operator and skip CAPM3 completely if all you need is +bare-metal provisioning via Kubernetes API. diff --git a/_faqs/what-is-Ironic.md b/_faqs/what-is-Ironic.md index fbc43347d..1576b7610 100644 --- a/_faqs/what-is-Ironic.md +++ b/_faqs/what-is-Ironic.md @@ -2,4 +2,7 @@ question: What is Ironic and how does Metal3 relate to it? --- -Ironic is a bare metal provisioner, it handles provisioning of physical machines. Metal3 exposes a part of the Ironic functionality as a Kubernetes native API via the Baremetal Operator. Ironic is not part of Metal3 but Metal3 relies on Ironic to provision the bare metal hosts. +Ironic is a bare metal provisioner, it handles provisioning of physical +machines. Metal3 exposes a part of the Ironic functionality as a +Kubernetes native API via the Baremetal Operator. Ironic is not part of +Metal3 but Metal3 relies on Ironic to provision the bare metal hosts. diff --git a/_faqs/what-is-an-operator.md b/_faqs/what-is-an-operator.md index a66608d7f..c269999f1 100644 --- a/_faqs/what-is-an-operator.md +++ b/_faqs/what-is-an-operator.md @@ -2,4 +2,10 @@ question: What is an operator? --- -An Operator is a method of packaging, deploying and managing a Kubernetes application. A Kubernetes application is an application that is both deployed on Kubernetes and managed using the Kubernetes APIs and kubectl tooling. You can think of Operators as the runtime that manages this type of application on Kubernetes. If you want to learn more about Operators you can check the Operator framework website [https://operatorframework.io/what/](https://operatorframework.io/what/) +An Operator is a method of packaging, deploying and managing a +Kubernetes application. A Kubernetes application is an application that +is both deployed on Kubernetes and managed using the Kubernetes APIs and +kubectl tooling. You can think of Operators as the runtime that manages +this type of application on Kubernetes. If you want to learn more about +Operators you can check the Operator framework website +[https://operatorframework.io/what/](https://operatorframework.io/what/) diff --git a/_faqs/what-is-cleaning.md b/_faqs/what-is-cleaning.md index 178170cde..d2c840b4f 100644 --- a/_faqs/what-is-cleaning.md +++ b/_faqs/what-is-cleaning.md @@ -2,4 +2,6 @@ question: What is cleaning? Can I disable it? --- -Cleaning removes partitioning information from the disks to avoid conflicts with the new operating system. See [automated cleaning](https://book.metal3.io/bmo/automated_cleaning) for details. +Cleaning removes partitioning information from the disks to avoid +conflicts with the new operating system. See +[automated cleaning](https://book.metal3.io/bmo/automated_cleaning) for details. diff --git a/_faqs/what-is-inspection.md b/_faqs/what-is-inspection.md index 8b04aef38..e5555893d 100644 --- a/_faqs/what-is-inspection.md +++ b/_faqs/what-is-inspection.md @@ -2,4 +2,8 @@ question: What is inspection? Can I disable it? --- -Inspection is used to populate hardware information in the BareMetalHost objects. You can [disable it](https://book.metal3.io/bmo/external_inspection), but you may need to populate this information yourself. Do not blindly disable inspection if it fails - chances are high the subsequent operations fail the same way. +Inspection is used to populate hardware information in the BareMetalHost +objects. You can [disable it](https://book.metal3.io/bmo/external_inspection), +but you may need to populate this information yourself. Do not blindly +disable inspection if it fails - chances are high the subsequent +operations fail the same way. diff --git a/_faqs/what-is-ipxe.md b/_faqs/what-is-ipxe.md index 8df6b4ac6..fddeee17e 100644 --- a/_faqs/what-is-ipxe.md +++ b/_faqs/what-is-ipxe.md @@ -2,4 +2,6 @@ question: What is iPXE? --- -The iPXE project develops firmware for booting machines over the network. It's a more feature-rich alternative to the well known PXE and can be used as an add-on on top of PXE. +The iPXE project develops firmware for booting machines over the +network. It's a more feature-rich alternative to the well known PXE and +can be used as an add-on on top of PXE. diff --git a/_faqs/what-is-virtual-media.md b/_faqs/what-is-virtual-media.md index d240b96a0..44768b1f6 100644 --- a/_faqs/what-is-virtual-media.md +++ b/_faqs/what-is-virtual-media.md @@ -2,4 +2,5 @@ question: What is virtual media? --- -Virtual media is a technology that allows booting an ISO on a remote machine without resorting to network boot (e.g. PXE). +Virtual media is a technology that allows booting an ISO on a remote +machine without resorting to network boot (e.g. PXE). diff --git a/_faqs/why-use-Ironic.md b/_faqs/why-use-Ironic.md index 233933269..f0cd68622 100644 --- a/_faqs/why-use-Ironic.md +++ b/_faqs/why-use-Ironic.md @@ -2,4 +2,6 @@ question: Why use Ironic? --- -Ironic is an established service with a long history of production usage and good support for industry standards. By using it, Metal3 can concentrate on providing the best integration with Kubernetes. +Ironic is an established service with a long history of production usage +and good support for industry standards. By using it, Metal3 can +concentrate on providing the best integration with Kubernetes. diff --git a/_includes/.markdownlint-cli2.yaml b/_includes/.markdownlint-cli2.yaml new file mode 100644 index 000000000..841c605a1 --- /dev/null +++ b/_includes/.markdownlint-cli2.yaml @@ -0,0 +1,4 @@ +# Reference: https://github.com/DavidAnson/markdownlint-cli2#markdownlint-cli2yaml + +config: + first-line-h1: false diff --git a/_includes/docs.md b/_includes/docs.md index 6ecbb496e..51105e3b5 100644 --- a/_includes/docs.md +++ b/_includes/docs.md @@ -1,9 +1,10 @@ + ## Around the Web ### Conference Talks -- [Metal³: Deploy Kubernetes on Bare Metal - Yolanda Robla - Shift Dev 2019]({% post_url 2020-01-20-metal3_deploy_kubernetes_on_bare_metal %}) -- [Introducing metal3 kubernetes native bare metal host management - Kubecon NA 2019]({% post_url 2019-12-04-Introducing_metal3_kubernetes_native_bare_metal_host_management %}) +- [Metal3: Deploy Kubernetes on Bare Metal - Yolanda Robla - Shift Dev 2019]({% post_url 2020-01-20-metal3_deploy_kubernetes_on_bare_metal %}) +- [Introducing Metal3 kubernetes native bare metal host management - Kubecon NA 2019]({% post_url 2019-12-04-Introducing_metal3_kubernetes_native_bare_metal_host_management %}) - [Extend Your Data Center to the Hybrid Edge - Red Hat Summit, May 2019]({% post_url 2019-11-13-Extend_Your_Data_Center_to_the_Hybrid_Edge-Red_Hat_Summit %}) - [OpenStack Ironic and Bare Metal Infrastructure: All Abstractions Start Somewhere - Chris Hoge, OpenStack Foundation; Julia Kreger, Red Hat]({% post_url 2019-10-31-OpenStack-Ironic-and-Bare-Metal-Infrastructure_All-Abstractions-Start-Somewhere %}) - [Kubernetes-native Infrastructure: Managed Baremetal with Kubernetes Operators and OpenStack Ironic - Steve Hardy, Red Hat]({% post_url 2019-11-07-Kubernetes-native_Infrastructure-Managed_Baremetal_with_Kubernetes_Operators_and_OpenStack_Ironic %}) @@ -13,10 +14,14 @@ - [The New Stack: Metal3 Uses OpenStack's Ironic for Declarative Bare Metal Kubernetes]({% post_url 2019-05-13-The_new_stack_Metal3_Uses_OpenStack_Ironic_for_Declarative_Bare_Metal_Kubernetes %}) - [The Register: Raise some horns: Red Hat's MetalKube aims to make Kubernetes on bare machines simple]({% post_url 2019-04-12-Raise_some_horns_Red_Hat_s_MetalKube_aims_to_make_Kubernetes_on_bare_machines_simple %}) + + ### Blog Posts -- [Metal³ Blog posts](/blog/) +- [Metal3 Blog posts](/blog/) ### Community Meetups -- Join Metal³ Team Meetups to engage in discussion with members and help with a deeper understanding of the project as well as the future discussion +- Join Metal3 Team Meetups to engage in discussion with members and help + with a deeper understanding of the project as well as the future + discussion diff --git a/_includes/privacy-statement.md b/_includes/privacy-statement.md index 42e67133f..a3f3165d1 100644 --- a/_includes/privacy-statement.md +++ b/_includes/privacy-statement.md @@ -1,15 +1,27 @@ -## Privacy Statement for the Metal³ Project +## Privacy Statement for the Metal3 Project -As Metal3.io and most of the infrastructure of the Metal³ Project are currently hosted by Red Hat Inc., this site falls under the [Red Hat Privacy Policy](https://www.redhat.com/en/about/privacy-policy){:target="\_blank"}. All terms of that privacy policy apply to this site. Should we change our hosting in the future, this Privacy Policy will be updated. +As Metal3.io and most of the infrastructure of the Metal3 Project are +currently hosted by Red Hat Inc., this site falls under the +[Red Hat Privacy Policy](https://www.redhat.com/en/about/privacy-policy){:target="\_blank"}. +All terms of that privacy policy apply to this site. Should we change +our hosting in the future, this Privacy Policy will be updated. ### How to Contact Us -If you have any questions about any of these practices or Metal³'s use of your personal information, please feel free to [contact us](mailto:privacy@metal3.io) or [file an Issue](https://github.com/metal3-io/metal3-io.github.io/issues){:target="\_blank"} in our GitHub repo. +If you have any questions about any of these practices or Metal3's use +of your personal information, please feel free to [contact +us](mailto:privacy@metal3.io) or [file an +Issue](https://github.com/metal3-io/metal3-io.github.io/issues){:target="\_blank"} +in our GitHub repo. -Metal³ will work with you to resolve any concerns you may have about this Statement. +Metal3 will work with you to resolve any concerns you may have about +this Statement. ### Changes to this Privacy Statement -Metal³ reserves the right to change this policy from time to time. If we do make changes, the revised Privacy Statement will be posted on this site. A notice will be posted on our blog and/or mailing lists whenever this privacy statement is changed in a material way. +Metal3 reserves the right to change this policy from time to time. If we +do make changes, the revised Privacy Statement will be posted on this +site. A notice will be posted on our blog and/or mailing lists whenever +this privacy statement is changed in a material way. This Privacy Statement was last amended on September 25, 2019. diff --git a/_posts/.markdownlint-cli2.yaml b/_posts/.markdownlint-cli2.yaml new file mode 100644 index 000000000..f496a3058 --- /dev/null +++ b/_posts/.markdownlint-cli2.yaml @@ -0,0 +1,5 @@ +# Reference: https://github.com/DavidAnson/markdownlint-cli2#markdownlint-cli2yaml + +config: + line-length: false + first-line-h1: false diff --git a/_posts/2019-04-12-Raise_some_horns_Red_Hat_s_MetalKube_aims_to_make_Kubernetes_on_bare_machines_simple.md b/_posts/2019-04-12-Raise_some_horns_Red_Hat_s_MetalKube_aims_to_make_Kubernetes_on_bare_machines_simple.md index fa89237a0..ba37f87e2 100644 --- a/_posts/2019-04-12-Raise_some_horns_Red_Hat_s_MetalKube_aims_to_make_Kubernetes_on_bare_machines_simple.md +++ b/_posts/2019-04-12-Raise_some_horns_Red_Hat_s_MetalKube_aims_to_make_Kubernetes_on_bare_machines_simple.md @@ -16,31 +16,61 @@ categories: author: Pedro Ibáñez Requena --- + + ## The Register; Raise some horns: Red Hat's Metal³ aims to make Kubernetes on bare machines simple -[Max Smolaks](https://www.theregister.co.uk/Author/Max-Smolaks) talks in this article about the OpenInfra Days in the UK, 2019: where Metal³ was revealed earlier last week by Steve Hardy, Red Hat's senior principal software engineer. The Open Infrastructure Days in the UK is an event organised by the local Open Infrastructure community and supported by the OpenStack Foundation. -The Open-source software developers at Red Hat are working on a tool that would simplify the deployment and management of Kubernetes clusters on bare-metal servers. + + +[Max Smolaks](https://www.theregister.co.uk/Author/Max-Smolaks) talks in +this article about the OpenInfra Days in the UK, 2019: where Metal³ was +revealed earlier last week by Steve Hardy, Red Hat's senior principal +software engineer. The Open Infrastructure Days in the UK is an event +organised by the local Open Infrastructure community and supported by +the OpenStack Foundation. The Open-source software developers at Red Hat +are working on a tool that would simplify the deployment and management +of Kubernetes clusters on bare-metal servers. Steve told The Register: -> "In some situations, you won't want to run a full OpenStack infrastructure-as-a-service layer to provide, potentially, for multiple Kubernetes clusters". +> "In some situations, you won't want to run a full OpenStack +> infrastructure-as-a-service layer to provide, potentially, for +> multiple Kubernetes clusters". -Hardy is a notable contributor to OpenStack, having previously worked on Heat and TripleO projects. He said one of the reasons for choosing Ironic was its active development – and when new features get added to Ironic, the Metal³ team gets them "for free". +Hardy is a notable contributor to OpenStack, having previously worked on +Heat and TripleO projects. He said one of the reasons for choosing +Ironic was its active development – and when new features get added to +Ironic, the Metal³ team gets them "for free". -> "OpenStack has always been a modular set of projects, and people have always had the opportunity to reuse components for different applications. This is just an example of where we are leveraging one particular component for infrastructure management, just as an alternative to using a full infrastructure API," Hardy said. +> "OpenStack has always been a modular set of projects, and people have +> always had the opportunity to reuse components for different +> applications. This is just an example of where we are leveraging one +> particular component for infrastructure management, just as an +> alternative to using a full infrastructure API," Hardy said. -Thierry Carrez, veep of engineering at the OpenStack Foundation also told The Register: +Thierry Carrez, veep of engineering at the OpenStack Foundation also told +The Register: -> "I like the fact that the projects end up being reusable on their own, for the functions they bring to the table – this helps us integrate with adjacent communities". +> "I like the fact that the projects end up being reusable on their own, +> for the functions they bring to the table – this helps us integrate +> with adjacent communities". Hardy also commented: -> It's still early days for Metal³ - the project has just six contributors, and there's no telling when it might reach release. "It's a very, very young project but we are keen to get more community participation and feedback,". +> It's still early days for Metal³ - the project has just six +> contributors, and there's no telling when it might reach release. +> "It's a very, very young project but we are keen to get more community +> participation and feedback,". -For further detail, check out the full article at [The Register: Raise some horns: Red Hat's MetalKube aims to make Kubernetes on bare machines simple](https://www.theregister.co.uk/2019/04/05/red_hat_metalkubel/). +For further detail, check out the full article at +[The Register: Raise some horns: Red Hat's MetalKube aims to make Kubernetes on +bare machines simple](https://www.theregister.co.uk/2019/04/05/red_hat_metalkubel/) +. ## References -- [Steve Hardy](https://hardysteven.blogspot.com): Red Hat's senior principal software engineer. -- [Thierry Carrez](https://ttx.re/): veep of engineering at the OpenStack Foundation. +- [Steve Hardy](https://hardysteven.blogspot.com): Red Hat's senior + principal software engineer. +- [Thierry Carrez](https://ttx.re/): veep of engineering at the + OpenStack Foundation. - [The Register: Raise some horns: Red Hat's MetalKube aims to make Kubernetes on bare machines simple](https://www.theregister.co.uk/2019/04/05/red_hat_metalkubel/) diff --git a/_posts/2019-05-13-The_new_stack_Metal3_Uses_OpenStack_Ironic_for_Declarative_Bare_Metal_Kubernetes.md b/_posts/2019-05-13-The_new_stack_Metal3_Uses_OpenStack_Ironic_for_Declarative_Bare_Metal_Kubernetes.md index 14ba24b4b..fd9d1a0af 100644 --- a/_posts/2019-05-13-The_new_stack_Metal3_Uses_OpenStack_Ironic_for_Declarative_Bare_Metal_Kubernetes.md +++ b/_posts/2019-05-13-The_new_stack_Metal3_Uses_OpenStack_Ironic_for_Declarative_Bare_Metal_Kubernetes.md @@ -28,7 +28,7 @@ the new project that provides “bare metal host provisioning integration for Ku Some words from Kreger in an interview with The New Stack: > “I think the bigger trend that we’re starting to see is a recognition that common tooling and substrate helps everyone succeed faster with more efficiency.” - +> > “This is combined with a shift in the way operators are choosing to solve their problems at scale, specifically in regards to isolation, cost, or performance.” For further detail, check out the [video of the keynote]({% post_url 2019-10-31-OpenStack-Ironic-and-Bare-Metal-Infrastructure_All-Abstractions-Start-Somewhere %}), which includes a demonstration of Metal3 being used to quickly provision three bare metal servers with Kubernetes diff --git a/_posts/2019-06-25-Metal3.md b/_posts/2019-06-25-Metal3.md index 04608dbab..3e1e02186 100644 --- a/_posts/2019-06-25-Metal3.md +++ b/_posts/2019-06-25-Metal3.md @@ -135,7 +135,7 @@ spec: but also: - [Machine type objects](https://github.com/kubernetes-sigs/cluster-api/blob/60933cb23498d0621f57454c208fc3a8d6e18bf2/api/v1alpha2/machine_types.go) -- [MachineSet type objects][(https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/machineset_types.go](https://github.com/kubernetes-sigs/cluster-api/blob/60933cb23498d0621f57454c208fc3a8d6e18bf2/api/v1alpha2/machineset_types.go)) +- [MachineSet type objects](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/machineset_types.go) - [MachineDeployment type objects](https://github.com/kubernetes-sigs/cluster-api/blob/60933cb23498d0621f57454c208fc3a8d6e18bf2/api/v1alpha2/machinedeployment_types.go) - [etc](https://github.com/kubernetes-sigs/cluster-api/tree/60933cb23498d0621f57454c208fc3a8d6e18bf2/api/v1alpha2) diff --git a/_posts/2019-10-31-OpenStack-Ironic-and-Bare-Metal-Infrastructure_All-Abstractions-Start-Somewhere.md b/_posts/2019-10-31-OpenStack-Ironic-and-Bare-Metal-Infrastructure_All-Abstractions-Start-Somewhere.md index ad7e4ada9..257407e67 100644 --- a/_posts/2019-10-31-OpenStack-Ironic-and-Bare-Metal-Infrastructure_All-Abstractions-Start-Somewhere.md +++ b/_posts/2019-10-31-OpenStack-Ironic-and-Bare-Metal-Infrastructure_All-Abstractions-Start-Somewhere.md @@ -12,8 +12,12 @@ The history of cloud computing has rapidly layered abstractions on abstractions In this video, Chris and Julia show how OpenStack Ironic is a solution to the problem of managing bare-metal infrastructure. + + + + ## Speakers [Chris Hoge](https://twitter.com/hogepodge) is a Senior Strategic Program Manager for the OpenStack foundation. He's been an active contributor to the Interop Working Group (formerly DefCore) and helps run the trademark program for the OpenStack Foundation. He also works on collaborations between the OpenStack and Kubernetes communities. Previously he worked as an OpenStack community manager and developer at Puppet Labs and operated a research cloud for the College of Arts and Sciences at The University of Oregon. When not cloud computing, he enjoys long-distance running, dancing, and throwing a ball for his Border Collie. diff --git a/_posts/2019-12-04-Introducing_metal3_kubernetes_native_bare_metal_host_management.md b/_posts/2019-12-04-Introducing_metal3_kubernetes_native_bare_metal_host_management.md index dc20877bc..21b6076ac 100644 --- a/_posts/2019-12-04-Introducing_metal3_kubernetes_native_bare_metal_host_management.md +++ b/_posts/2019-12-04-Introducing_metal3_kubernetes_native_bare_metal_host_management.md @@ -12,8 +12,12 @@ Metal³ (`metal cubed/Kube`) is a new open-source bare metal host provisioning t In this video, Russell Bryant and Doug Hellmann speak about the what's and how's of Metal³, a new tool that enables the management of bare metal hosts via custom resources managed through the Kubernetes API. + + + + ## Speakers [Russell Bryant](http://www.russellbryant.net/) Russell Bryant is a Distinguished Engineer at Red Hat, where he works on infrastructure management to support Kubernetes clusters. Prior to working on the Metal³ project, Russell worked on other open infrastructure projects. Russell worked in Software Defined Networking with Open vSwitch (OVS) and Open Virtual Network (OVN) and worked on various parts of OpenStack. Russell also worked in open source telephony via the Asterisk project. diff --git a/_posts/2020-01-20-metal3_deploy_kubernetes_on_bare_metal.md b/_posts/2020-01-20-metal3_deploy_kubernetes_on_bare_metal.md index 5b65a0eb1..eb01b537b 100644 --- a/_posts/2020-01-20-metal3_deploy_kubernetes_on_bare_metal.md +++ b/_posts/2020-01-20-metal3_deploy_kubernetes_on_bare_metal.md @@ -12,8 +12,12 @@ Some of the most influential minds in the developer industry were landing in the In this video, Yolanda Robla speaks about the deployment of Kubernetes on Bare Metal with the help of Metal³, a new tool that enables the management of bare metal hosts via custom resources managed through the Kubernetes API. + + + + ## Speakers [Yolanda Robla](https://www.linkedin.com/in/yolanda-robla-2008158/) Yolanda Robla is a Principal Software Engineer at Red Hat. In her own words: diff --git a/_posts/2020-02-18-metal3-dev-env-install-deep-dive.md b/_posts/2020-02-18-metal3-dev-env-install-deep-dive.md index 82aed6981..b777c4976 100644 --- a/_posts/2020-02-18-metal3-dev-env-install-deep-dive.md +++ b/_posts/2020-02-18-metal3-dev-env-install-deep-dive.md @@ -9,38 +9,74 @@ author: Alberto Losada ## **Introduction to metal3-dev-env** -The [metal3-dev-env](https://github.com/metal3-io/metal3-dev-env) is a collection of scripts in a GitHub repository inside the [Metal³](https://github.com/metal3-io?type=source) project that aims to allow contributors and other interested users to run a fully functional Metal³ environment for testing and have a first contact with the project. Actually, `metal3-dev-env` sets up an **emulated environment** which creates a set of virtual machines (VMs) to manage as if they were bare metal hosts. +The [metal3-dev-env](https://github.com/metal3-io/metal3-dev-env) is a +collection of scripts in a GitHub repository inside the +[Metal³](https://github.com/metal3-io?type=source) project that aims to +allow contributors and other interested users to run a fully functional +Metal³ environment for testing and have a first contact with the +project. Actually, `metal3-dev-env` sets up an **emulated environment** +which creates a set of virtual machines (VMs) to manage as if they were +bare metal hosts. > warning "Warning" -> This is not an installation that is supposed to be run in production. Instead, it is focused on providing a development environment to test and validate new features. +> This is not an installation that is supposed to be run in production. +> Instead, it is focused on providing a development environment to test +> and validate new features. -The `metal3-dev-env` repository includes a set of scripts, libraries and resources used to set up a Metal³ development environment. On the [Metal³ website](https://metal3.io/try-it.html) there is already a documented process on how to use the `metal3-dev-env` scripts to set up a fully functional cluster to test the functionality of the Metal³ components. +The `metal3-dev-env` repository includes a set of scripts, libraries and +resources used to set up a Metal³ development environment. On the +[Metal³ website](https://metal3.io/try-it.html) there is already a +documented process on how to use the `metal3-dev-env` scripts to set up +a fully functional cluster to test the functionality of the Metal³ +components. -This procedure at a 10,000-foot view is composed of 3 bash scripts plus a verification one: +This procedure at a 10,000-foot view is composed of 3 bash scripts plus +a verification one: - **01_prepare_host.sh** - Mainly installs all needed packages. -- **02_configure_host.sh** - Basically create a set of VMs that will be managed as if they were bare metal hosts. It also downloads some images needed for Ironic. -- **03_launch_mgmt_cluster.sh** - Launches a management cluster using minikube and runs the baremetal-operator on that cluster. -- **04_verify.sh** - Finally runs a set of tests that verify that the deployment was completed successfully +- **02_configure_host.sh** - Basically create a set of VMs that will be + managed as if they were bare metal hosts. It also downloads some + images needed for Ironic. +- **03_launch_mgmt_cluster.sh** - Launches a management cluster using + minikube and runs the baremetal-operator on that cluster. +- **04_verify.sh** - Finally runs a set of tests that verify that the + deployment was completed successfully -In this blog post, we are going to expand the information and provide some hints and recommendations. +In this blog post, we are going to expand the information and provide +some hints and recommendations. > warning "Warning" -> Metal³ project is changing rapidly, so probably this information is valuable in the short term. In any case, it is encouraged to double-check that the information provided is still valid. +> Metal³ project is changing rapidly, so probably this information is +> valuable in the short term. In any case, it is encouraged to +> double-check that the information provided is still valid. Before getting down to it, it is worth defining the nomenclature used in the blog post: -- **Host.** It is the server where the virtual environment is running. In this case, it is a physical PowerEdge M520 with 2 x Intel(R) Xeon(R) CPU E5-2450 v2 @ 2.50GHz, 96GB RAM and a 140GB drive running CentOS 7 latest. Do not panic, lab environment should work with lower resources as well. -- **Virtual bare metal hosts.** These are the virtual machines (KVM based) that are running on the host which are emulating physical hosts in our lab. They are also called bare metal hosts even if they are not physical servers. -- **Management or bootstrap cluster.** It is a fully functional Kubernetes cluster in charge of running all the necessary Metal³ operators and controllers to manage the infrastructure. In this case it is the minikube virtual machine. -- **Target cluster.** It is the Kubernetes cluster created from the management one. It is provisioned and configured using a native Kubernetes API for that purpose. +- **Host.** It is the server where the virtual environment is running. + In this case, it is a physical PowerEdge M520 with 2 x Intel(R) + Xeon(R) CPU E5-2450 v2 @ 2.50GHz, 96GB RAM and a 140GB drive running + CentOS 7 latest. Do not panic, lab environment should work with lower + resources as well. +- **Virtual bare metal hosts.** These are the virtual machines (KVM + based) that are running on the host which are emulating physical hosts + in our lab. They are also called bare metal hosts even if they are not + physical servers. +- **Management or bootstrap cluster.** It is a fully functional + Kubernetes cluster in charge of running all the necessary Metal³ + operators and controllers to manage the infrastructure. In this case + it is the minikube virtual machine. +- **Target cluster.** It is the Kubernetes cluster created from the + management one. It is provisioned and configured using a native + Kubernetes API for that purpose. ## **Create the Metal³ laboratory** > info "Information" -> A non-root user must exist in the host with password-less sudo access. This user is in charge of running the `metal3-dev-env` scripts. +> A non-root user must exist in the host with password-less sudo access. +> This user is in charge of running the `metal3-dev-env` scripts. -The first thing that needs to be done is, obviously, cloning the `metal3-dev-env` repository: +The first thing that needs to be done is, obviously, cloning the +`metal3-dev-env` repository: ```sh [alosadag@eko1: ~]$ git clone https://github.com/metal3-io/metal3-dev-env.git @@ -53,7 +89,10 @@ Receiving objects: 100% (1660/1660), 446.08 KiB | 678.00 KiB/s, done. Resolving deltas: 100% (870/870), done. ``` -Before starting to deploy the Metal³ environment, it makes sense to detail a series of scripts inside the library folder that will be sourced in every step of the installation process. They are called _shared libraries_. +Before starting to deploy the Metal³ environment, it makes sense to +detail a series of scripts inside the library folder that will be +sourced in every step of the installation process. They are called +_shared libraries_. ```sh [alosadag@eko1:~]$ ls -1 metal3-dev-env/lib/ @@ -65,21 +104,35 @@ network.sh ### Shared libraries -Although there are several scripts placed inside the lib folder that are sourced in some of the deployment steps, `common.sh` and `logging.sh` are the only ones used in all of the executions during the installation process. +Although there are several scripts placed inside the lib folder that are +sourced in some of the deployment steps, `common.sh` and `logging.sh` +are the only ones used in all of the executions during the installation +process. #### **common.sh** -The first time this library is run, a new configuration file is created with several variables along with their default values. They will be used during the installation process. On the other hand, if the file already exists, then it just sources the values configured. The configuration file is created inside the cloned folder with `config_$USER` as the file name. +The first time this library is run, a new configuration file is created +with several variables along with their default values. They will be +used during the installation process. On the other hand, if the file +already exists, then it just sources the values configured. The +configuration file is created inside the cloned folder with +`config_$USER` as the file name. ```sh [alosadag@eko1 metal3-dev-env]$ ls config_* config_alosadag.sh ``` -The configuration file contains multiple variables that will be used during the set-up. Some of them are detailed [in the setup section of the Metal³ try-it web page](https://metal3.io/try-it.html#setup). In case you need to add or change global variables it should be done in this config file. +The configuration file contains multiple variables that will be used +during the set-up. Some of them are detailed [in the setup section of +the Metal³ try-it web page](https://metal3.io/try-it.html#setup). In +case you need to add or change global variables it should be done in +this config file. > note "Note" -> I personally recommend modifying or adding variables in this config file instead of exporting them in the shell. By doing that, it is assured that they are persisted +> I personally recommend modifying or adding variables in this config +> file instead of exporting them in the shell. By doing that, it is +> assured that they are persisted ```sh [alosadag@eko1 metal3-dev-env]$ cat ~/metal3-dev-env/config_alosadag.sh @@ -101,9 +154,15 @@ The configuration file contains multiple variables that will be used during the ... ``` -This `common.sh` library also makes sure there is an ssh public key available in the user's ssh folder. This key will be injected by `cloud-init` in all the virtual bare metal machines that will be configured later. Then, the user that executed the `metal3-dev-env` scripts is able to access the target cluster through ssh. +This `common.sh` library also makes sure there is an ssh public key +available in the user's ssh folder. This key will be injected by +`cloud-init` in all the virtual bare metal machines that will be +configured later. Then, the user that executed the `metal3-dev-env` +scripts is able to access the target cluster through ssh. -Also, `common.sh` library also sets more global variables apart from those in the config file. Note that these variables can be added to the config file along with the proper values for your environment. +Also, `common.sh` library also sets more global variables apart from +those in the config file. Note that these variables can be added to the +config file along with the proper values for your environment. | **Name of the variable** | **Default value** | | ------------------------ | --------------------------------------------------------- | @@ -127,11 +186,14 @@ Also, `common.sh` library also sets more global variables apart from those in th | KUSTOMIZE_VERSION | v3.2.3 | > info "Information" -> It is important to mention that there are several basic functions defined in this file that will be used by the rest of scripts. +> It is important to mention that there are several basic functions +> defined in this file that will be used by the rest of scripts. #### **logging.sh** -This script ensures that there is a log folder where all the information gathered during the execution of the scripts is stored. If there is any issue during the deployment, this is one of the first places to look at. +This script ensures that there is a log folder where all the information +gathered during the execution of the scripts is stored. If there is any +issue during the deployment, this is one of the first places to look at. ```sh [alosadag@eko1 metal3-dev-env]$ ls -1 logs/ @@ -142,65 +204,103 @@ host_cleanup-2020-02-03-122656.log ## **First step: Prepare the host** -In this first step (`01_prepare_host.sh`), the requirements needed to start the preparation of the host where the virtual bare metal hosts will run are fulfilled. Depending on the host's operating system (OS), it will trigger a specific script for `CentOS/Red Hat` or `Ubuntu`. +In this first step (`01_prepare_host.sh`), the requirements needed to +start the preparation of the host where the virtual bare metal hosts +will run are fulfilled. Depending on the host's operating system (OS), +it will trigger a specific script for `CentOS/Red Hat` or `Ubuntu`. > note: "Note" -> Currently `CentOS Linux 7`, `Red Hat Enterprise Linux 8` and `Ubuntu` have been tested. [There is work in progress to adapt the deployment for CentOS Linux 8.](https://github.com/metal3-io/metal3-dev-env/pull/157) +> Currently `CentOS Linux 7`, `Red Hat Enterprise Linux 8` and `Ubuntu` +> have been tested. [There is work in progress to adapt the deployment +> for CentOS Linux +> 8.](https://github.com/metal3-io/metal3-dev-env/pull/157) -As stated previously, `CentOS 7` is the operating system chosen to run in both, the host and virtual servers. Therefore, specific packages of the operating system are applied in the following script: +As stated previously, `CentOS 7` is the operating system chosen to run +in both, the host and virtual servers. Therefore, specific packages of +the operating system are applied in the following script: > - **centos_install_requirements.sh** - -> This script enables `epel` and `tripleo` (current-tripleo) repositories where several packages are installed: `dnf`, `ansible`, `wget`, `python3` and python related packages such as `python-virtualbmc` from tripleo repository. - +> This script enables `epel` and `tripleo` (current-tripleo) +> repositories where several packages are installed: `dnf`, `ansible`, +> `wget`, `python3` and python related packages such as +> `python-virtualbmc` from tripleo repository. > note "Note" -> Notice that _SELinux_ is set to _permissive_ and an OS update is triggered, which will cause several packages to be upgraded since there are newer packages in the tripleo repositories (mostly python related) than in the rest of enabled repositories. -> At this point, the container runtime is also installed. Note that by setting the variable `CONTAINER_RUNTIME` defined in [common.sh](#commonsh) is possible to choose between docker and podman, which is the default for CentOS. Remember that this behaviour can be overwritten in your config file. - -Once the specific requirements for the elected operating system are accomplished, the download of several external artifacts is executed. Actually _minikube_, _kubectl_ and _kustomize_ are downloaded from the internet. Notice that the version of Kustomize and Kubernetes is defined by `KUSTOMIZE_VERSION` and `KUBERNETES_VERSION` variables inside [common.sh](#commonsh), but minikube is always downloading the latest version available. - -The next step deals with cleaning ironic containers and **pods** that could be running in the host from failed deployments. This will ensure that there will be no issues when creating `ironic-pod` and `infra-pod` a little bit later in this first step. +> Notice that _SELinux_ is set to _permissive_ and an OS update is +> triggered, which will cause several packages to be upgraded since +> there are newer packages in the tripleo repositories (mostly python +> related) than in the rest of enabled repositories. At this point, the +> container runtime is also installed. Note that by setting the variable +> `CONTAINER_RUNTIME` defined in [common.sh](#commonsh) is possible to +> choose between docker and podman, which is the default for CentOS. +> Remember that this behaviour can be overwritten in your config file. + +Once the specific requirements for the elected operating system are +accomplished, the download of several external artifacts is executed. +Actually _minikube_, _kubectl_ and _kustomize_ are downloaded from the +internet. Notice that the version of Kustomize and Kubernetes is defined +by `KUSTOMIZE_VERSION` and `KUBERNETES_VERSION` variables inside +[common.sh](#commonsh), but minikube is always downloading the latest +version available. + +The next step deals with cleaning ironic containers and **pods** that +could be running in the host from failed deployments. This will ensure +that there will be no issues when creating `ironic-pod` and `infra-pod` +a little bit later in this first step. > - **network.sh.** - -> At this point, the network library script is sourced. As expected, this library deals with the network configuration which includes: IP addresses, network definitions and IPv6 support which is disabled by default by setting `PROVISIONING_IPV6` variable: > -> > - +> At this point, the network library script is sourced. As expected, +> this library deals with the network configuration which includes: IP +> addresses, network definitions and IPv6 support which is disabled by +> default by setting `PROVISIONING_IPV6` variable: +> > | Name of the variable | Default value | Option | > | -------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------- | > | PROVISIONING_NETWORK | 172.22.0.0/24 | This is the subnet used to run the OS provisioning process | > | EXTERNAL_SUBNET | 192.168.111.0/24 | This is the subnet used on the "baremetal" libvirt network, created as the primary network interface for the virtual bare metal hosts | -> | LIBVIRT_FIRMWARE | bios | -> | PROVISIONING_IPV6 | false | - -> Below it is depicted a network diagram of the different virtual networks and virtual servers involved in the Metal³ environment: +> | LIBVIRT_FIRMWARE | bios | | +> | PROVISIONING_IPV6 | false | | +> +> Below it is depicted a network diagram of the different virtual +> networks and virtual servers involved in the Metal³ environment: ![metal³ dev env virtual networking](/assets/2020-02-18-metal3-dev-env-install-deep-dive/metal3-dev-env.resized.png) > - **images.sh.** - -> The `images.sh` library file is sourced as well in script `01_prepare_host.sh`. The `images.sh` script contains multiple variables that set the URL (`IMAGE_LOCATION`), name (`IMAGE_NAME`) and default username (`IMAGE_USERNAME`) of the cloud image that needs to be downloaded. The values of each variable will differ depending on the operating system of the virtual bare metal hosts. Note that these images will be served from the host to the virtual servers through the provisioning network. - -> In our case, since `CentOS 7` is the base operating system, values will be defined as: > -> > - +> The `images.sh` library file is sourced as well in script +> `01_prepare_host.sh`. The `images.sh` script contains multiple +> variables that set the URL (`IMAGE_LOCATION`), name (`IMAGE_NAME`) and +> default username (`IMAGE_USERNAME`) of the cloud image that needs to +> be downloaded. The values of each variable will differ depending on +> the operating system of the virtual bare metal hosts. Note that these +> images will be served from the host to the virtual servers through the +> provisioning network. +> +> In our case, since `CentOS 7` is the base operating system, values +> will be defined as: +> > | **Name of the variable** | **Default value** | > | ------------------------ | --------------------------------------- | > | IMAGE_NAME | CentOS-7-x86_64-GenericCloud-1907.qcow2 | -> | IMAGE_LOCATION | http://cloud.centos.org/centos/7/images | +> | IMAGE_LOCATION | [http://cloud.centos.org/centos/7/images](http://cloud.centos.org/centos/7/images) | > | IMAGE USERNAME | centos | - +> > info "Information" -> In case it is expected to use a custom cloud image, just modify the previous variables to match the right location. +> In case it is expected to use a custom cloud image, just modify the +> previous variables to match the right location. -Now that the cloud image is defined, the download process can be started. First, a folder defined by `IRONIC_IMAGE_DIR` should exist so that the image (`CentOS-7-x86_64-GenericCloud-1907.qcow2`) and its checksum can be stored. This folder and its content will be exposed through a local `ironic` container running in the host. +Now that the cloud image is defined, the download process can be +started. First, a folder defined by `IRONIC_IMAGE_DIR` should exist so +that the image (`CentOS-7-x86_64-GenericCloud-1907.qcow2`) and its +checksum can be stored. This folder and its content will be exposed +through a local `ironic` container running in the host. | **Name of the variable** | **Default value** | | IRONIC_IMAGE_DIR | /opt/metal3-dev-env/ironic/html/images | -Below it is verified that the cloud image files were downloaded successfully in the defined folder: +Below it is verified that the cloud image files were downloaded +successfully in the defined folder: ```sh [alosadag@eko1 metal3-dev-env]$ ll /opt/metal3-dev-env/ironic/html/images @@ -209,7 +309,9 @@ total 920324 -rw-rw-r--. 1 alosadag alosadag 33 Feb 3 12:39 CentOS-7-x86_64-GenericCloud-1907.qcow2.md5sum ``` -Once the shared script `images.sh` is sourced, the following container images are pre-cached locally to the host in order to speed up things later. Below is shown the code snippet in charge of that task: +Once the shared script `images.sh` is sourced, the following container +images are pre-cached locally to the host in order to speed up things +later. Below is shown the code snippet in charge of that task: ```sh + for IMAGE_VAR in IRONIC_IMAGE IPA_DOWNLOADER_IMAGE VBMC_IMAGE SUSHY_TOOLS_IMAGE DOCKER_REGISTRY_IMAGE @@ -230,9 +332,14 @@ The container image location of each one is defined by their respective variable | DOCKER_REGISTRY_IMAGE | docker.io/registry:latest | > info "Information" -> In case it is expected to modify the public container images to test new features, it is worth mentioning that there is a container registry running as a privileged container in the host. Therefore it is recommended to upload your modified images there and just overwrite the previous variables to match the right location. +> In case it is expected to modify the public container images to test +> new features, it is worth mentioning that there is a container +> registry running as a privileged container in the host. Therefore it +> is recommended to upload your modified images there and just overwrite +> the previous variables to match the right location. -At this point, an Ansible role is run locally in order to complete the local configuration. +At this point, an Ansible role is run locally in order to complete the +local configuration. ```sh ansible-playbook \ @@ -242,17 +349,39 @@ ansible-playbook \ -b -vvv vm-setup/install-package-playbook.yml ``` -This playbook imports two roles. One is called `packages_installation`, which is in charge of installing a few more packages. The list of packages installed are listed as default Ansible variables [in the vm-setup role inside the metal3-dev-env repository](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/packages_installation/defaults/main.yml). The other role is based on the [fubarhouse.golang](https://galaxy.ansible.com/fubarhouse/golang) Ansible Galaxy role. It is in charge of installing and configuring the exact `golang` version `1.12.12` defined in an Ansible variable in the [install-package-playbook.yml playbook](https://github.com/metal3-io/metal3-dev-env/blob/9fa752b90ed58fdadcd52c246d3023766dfcb2dc/vm-setup/install-package-playbook.yml#L12) - -Once the playbook is finished, a pod called `ironic-pod` is created. Inside that pod, a _privileged_ `ironic-ipa-downloader` container is started and attached to the host network. This container is in charge of downloading the [Ironic Python Agent](https://docs.openstack.org/ironic-python-agent/latest/) (IPA) files to a shared volume defined by `IRONIC_IMAGE_DIR`. This folder is exposed by the `ironic` container through HTTP. +This playbook imports two roles. One is called `packages_installation`, +which is in charge of installing a few more packages. The list of +packages installed are listed as default Ansible variables [in the +vm-setup role inside the metal3-dev-env +repository](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/packages_installation/defaults/main.yml). +The other role is based on the +[fubarhouse.golang](https://galaxy.ansible.com/fubarhouse/golang) +Ansible Galaxy role. It is in charge of installing and configuring the +exact `golang` version `1.12.12` defined in an Ansible variable in the +[install-package-playbook.yml +playbook](https://github.com/metal3-io/metal3-dev-env/blob/9fa752b90ed58fdadcd52c246d3023766dfcb2dc/vm-setup/install-package-playbook.yml#L12) + +Once the playbook is finished, a pod called `ironic-pod` is created. +Inside that pod, a _privileged_ `ironic-ipa-downloader` container is +started and attached to the host network. This container is in charge of +downloading the [Ironic Python +Agent](https://docs.openstack.org/ironic-python-agent/latest/) (IPA) +files to a shared volume defined by `IRONIC_IMAGE_DIR`. This folder is +exposed by the `ironic` container through HTTP. > info "Information" -> The [Ironic Python Agent](https://docs.openstack.org/ironic-python-agent/latest/) is an agent for controlling and deploying Ironic controlled baremetal nodes. Typically run in a ramdisk, the agent exposes a REST API for provisioning servers. +> The [Ironic Python +> Agent](https://docs.openstack.org/ironic-python-agent/latest/) is an +> agent for controlling and deploying Ironic controlled baremetal nodes. +> Typically run in a ramdisk, the agent exposes a REST API for +> provisioning servers. See below the code snippet that fulfils the task: ```sh -sudo podman run -d --net host --privileged --name ipa-downloader --pod ironic-pod -e IPA_BASEURI= -v /opt/metal3-dev-env/ironic:/shared quay.io/metal3-io/ironic-ipa-downloader /usr/local/bin/get-resource.sh +sudo podman run -d --net host --privileged --name ipa-downloader \ + --pod ironic-pod -e IPA_BASEURI= -v /opt/metal3-dev-env/ironic:/shared \ + quay.io/metal3-io/ironic-ipa-downloader /usr/local/bin/get-resource.sh ``` Below is shown the status of the pods and containers at this point: @@ -263,7 +392,9 @@ POD ID NAME STATUS CREATED CONTAINER INFO 5a0d475351aa ironic-pod Running 6 days ago [5a0d475351aa-infra] [ipa-downloader] 18f3a8f61407 ``` -The process will wait until the `ironic-python-agent` (IPA) initramfs, kernel and headers files are downloaded successfully. See below the files downloaded along with the `CentOS 7` cloud image: +The process will wait until the `ironic-python-agent` (IPA) initramfs, +kernel and headers files are downloaded successfully. See below the +files downloaded along with the `CentOS 7` cloud image: ```sh [alosadag@eko1 metal3-dev-env]$ ll /opt/metal3-dev-env/ironic/html/images @@ -276,7 +407,11 @@ lrwxrwxrwx. 1 root root 69 Feb 3 12:41 ironic-python-agent.kerne lrwxrwxrwx. 1 root root 74 Feb 3 12:41 ironic-python-agent.tar.headers -> ironic-python-agent-1862d000-59d9fdc6304b1/ironic-python-agent.tar.headers ``` -Afterwards, the script makes sure that libvirt is running successfully on the host and that the non-privileged user has permission to interact with it. Libvirt daemon should be running so that minikube can be installed successfully. See the following script snippet starting the minikube VM: +Afterwards, the script makes sure that libvirt is running successfully +on the host and that the non-privileged user has permission to interact +with it. Libvirt daemon should be running so that minikube can be +installed successfully. See the following script snippet starting the +minikube VM: ```sh + sudo su -l -c 'minikube start --insecure-registry 192.168.111.1:5000' @@ -284,15 +419,24 @@ Afterwards, the script makes sure that libvirt is running successfully on the ho * Selecting 'kvm2' driver from user configuration (alternates: [none]) ``` -In the same way, as with the host, container images are pre-cached but in this case inside minikube local image repository. Notice that in this case the [Bare Metal operator](https://github.com/metal3-io/baremetal-operator/) (BMO) is also downloaded since it will run on minikube. The container location is defined by `BAREMETAL_OPERATOR_IMAGE`. In case you want to test new features or new fixes to the BMO, just change the value of the variable to match the location of the modified image: +In the same way, as with the host, container images are pre-cached but +in this case inside minikube local image repository. Notice that in this +case the [Bare Metal +operator](https://github.com/metal3-io/baremetal-operator/) (BMO) is +also downloaded since it will run on minikube. The container location is +defined by `BAREMETAL_OPERATOR_IMAGE`. In case you want to test new +features or new fixes to the BMO, just change the value of the variable +to match the location of the modified image: | **Name of the variable** | **Default value** | | BAREMETAL_OPERATOR_IMAGE | quay.io/metal3-io/baremetal-operator | > note "Note" -> Remember that minikube is the management cluster in our environment. So it must run all the operators and controllers needed for Metal³. +> Remember that minikube is the management cluster in our environment. +> So it must run all the operators and controllers needed for Metal³. -Below is shown the output of the script once all the container images have been pulled to minikube: +Below is shown the output of the script once all the container images +have been pulled to minikube: ```sh + sudo su -l -c 'minikube ssh sudo docker image ls' alosadag @@ -314,7 +458,9 @@ k8s.gcr.io/pause 3.1 da86e6ba6ca1 gcr.io/k8s-minikube/storage-provisioner v1.8.1 4689081edb10 2 years ago 80.8MB ``` -Once the container images are stored, minikube can be stopped. At that moment, the virtual networks shown in the previous picture are attached to the minikube VM as can be verified by the following command: +Once the container images are stored, minikube can be stopped. At that +moment, the virtual networks shown in the previous picture are attached +to the minikube VM as can be verified by the following command: ```sh [alosadag@smc-master metal3-dev-env]$ sudo virsh domiflist minikube @@ -329,27 +475,52 @@ Interface Type Source Model MAC > info "Information" > At this point the host is ready to create the virtual infrastructure. -The video below exhibits all the configurations explained and executed during this _first_ step. +The video below exhibits all the configurations explained and executed +during this _first_ step. + + + + ## **Step 2: Configure the host** -In this step, the script `02_configure_host.sh` basically configures the libvirt/KVM virtual infrastructure and starts services in the host that will be consumed by the virtual bare metal machines: +In this step, the script `02_configure_host.sh` basically configures the +libvirt/KVM virtual infrastructure and starts services in the host that +will be consumed by the virtual bare metal machines: -- `Web server` to expose the `ironic-python-agent` (IPA) initramfs, kernel, headers and operating system cloud images. +- `Web server` to expose the `ironic-python-agent` (IPA) initramfs, + kernel, headers and operating system cloud images. - `Virtual BMC` to emulate a real baseboard management controller (BMC). -- `Container registry` where the virtual servers will pull the images needed to run a K8s installation. +- `Container registry` where the virtual servers will pull the images + needed to run a K8s installation. > info "Information" -> A baseboard management controller (BMC) is a specialized service processor that monitors the physical state of a computer, network server or other hardware device using sensors and communicating with the system administrator through an independent connection. The BMC is part of the Intelligent Platform Management Interface (IPMI) and is usually contained in the motherboard or main circuit board of the device to be monitored. - -First, an ssh-key in charge of communicating to libvirt is created if it does not exist previously. This key is called `id_rsa_virt_power`. It is added to the root authorized_keys and is used by `vbmc` and `sushy tools` to contact libvirt. +> A baseboard management controller (BMC) is a specialized service +> processor that monitors the physical state of a computer, network +> server or other hardware device using sensors and communicating with +> the system administrator through an independent connection. The BMC is +> part of the Intelligent Platform Management Interface (IPMI) and is +> usually contained in the motherboard or main circuit board of the +> device to be monitored. + +First, an ssh-key in charge of communicating to libvirt is created if it +does not exist previously. This key is called `id_rsa_virt_power`. It is +added to the root authorized_keys and is used by `vbmc` and `sushy +tools` to contact libvirt. > info "Information" -> `sushy-tools` is a set of simple simulation tools aiming at supporting the development and testing of the Redfish protocol implementations. +> `sushy-tools` is a set of simple simulation tools aiming at supporting +> the development and testing of the Redfish protocol implementations. -Next, another Ansible playbook called [setup-playbook.yml](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/setup-playbook.yml) is run against the host. It is focused on setting up the virtual infrastructure around `metal3-dev-env`. Below it is shown the Ansible variables that are passed to the playbook, which actually are obtaining the values from the global variables defined in the [common.sh](#commonsh) or the configuration file. +Next, another Ansible playbook called +[setup-playbook.yml](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/setup-playbook.yml) +is run against the host. It is focused on setting up the virtual +infrastructure around `metal3-dev-env`. Below it is shown the Ansible +variables that are passed to the playbook, which actually are obtaining +the values from the global variables defined in the +[common.sh](#commonsh) or the configuration file. ```sh ANSIBLE_FORCE_COLOR=true ansible-playbook \ @@ -379,24 +550,43 @@ ANSIBLE_FORCE_COLOR=true ansible-playbook \ | PROVISIONING_URL_HOST | 172.22.0.1 | > info "Information" -> There are variables that are only defined as Ansible variables, e.g. number of CPUs of the virtual bare metal server, size of disks, etc. In case you would like to change properties not defined globally in the `metal3-dev-env` take a look at the default variables specified in role: [common](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/common/defaults/main.yml) and [libvirt](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/libvirt/defaults/main.yml) +> There are variables that are only defined as Ansible variables, e.g. +> number of CPUs of the virtual bare metal server, size of disks, etc. +> In case you would like to change properties not defined globally in +> the `metal3-dev-env` take a look at the default variables specified in +> role: +> [common](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/common/defaults/main.yml) +> and +> [libvirt](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/libvirt/defaults/main.yml) The `setup-playbook.yml` is composed by 3 roles, which are detailed below: > - **Common.** - -> This role sets up the virtual hardware and network configuration of the VMs. Actually it is a [dependency](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/libvirt/meta/main.yml) of the `libvirt` and `virtbmc` Ansible roles. This means that the `common` role must always be executed before the roles that depend on them. Also, they are only executed once. If two roles state the same one as their dependency, it is only executed the first time. > -> > - +> This role sets up the virtual hardware and network configuration of +> the VMs. Actually it is a +> [dependency](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/libvirt/meta/main.yml) +> of the `libvirt` and `virtbmc` Ansible roles. This means that the +> `common` role must always be executed before the roles that depend on +> them. Also, they are only executed once. If two roles state the same +> one as their dependency, it is only executed the first time. +> +> > - **Libvirt.** - -> It actually is the role that configures the virtual bare metal servers. They are all identically defined with the same hardware and network configuration. Note that they are not started since they will be booted later by ironic during the provisioning process. - +> +> It actually is the role that configures the virtual bare metal +> servers. They are all identically defined with the same hardware and +> network configuration. Note that they are not started since they will +> be booted later by ironic during the provisioning process. +> > note "Note" -> It is possible to change the number of VMs to provision by replacing the value of `NUMBER_NODES` - -> Finally, once the VMs are defined and we have their MAC address, the ironic inventory file `ironic_nodes_json` is created. The action of creating a node is part of the enrollment process and the first step to prepare a node to reach the `available` status. +> It is possible to change the number of VMs to provision by replacing +> the value of `NUMBER_NODES` +> +> Finally, once the VMs are defined and we have their MAC address, the +> ironic inventory file `ironic_nodes_json` is created. The action of +> creating a node is part of the enrollment process and the first step +> to prepare a node to reach the `available` status. ```json { @@ -452,19 +642,32 @@ The `setup-playbook.yml` is composed by 3 roles, which are detailed below: ``` > info "Information" -> This role is also used to tear down the virtual infrastructure depending on the variable [libvirt_action](https://github.com/metal3-io/metal3-dev-env/blob/2b5d8e76f33d143757d1f0b9b1e82dc662245b9c/vm-setup/roles/libvirt/defaults/main.yml#L2) inside the Ansible role: `setup or teardown`. - +> This role is also used to tear down the virtual infrastructure +> depending on the variable +> [libvirt_action](https://github.com/metal3-io/metal3-dev-env/blob/2b5d8e76f33d143757d1f0b9b1e82dc662245b9c/vm-setup/roles/libvirt/defaults/main.yml#L2) +> inside the Ansible role: `setup or teardown`. +> > - **VirtBMC** - -> This role is only executed if the bare metal virtual machines are created in libvirt, because `vbmc` needs libvirt to emulate a real BMC. - +> +> This role is only executed if the bare metal virtual machines are +> created in libvirt, because `vbmc` needs libvirt to emulate a real +> BMC. +> > info "Information" -> VirtualBMC (`vmbc`) tool simulates a Baseboard Management Controller (BMC) by exposing IPMI responder to the network and talking to libvirt at the host vBMC is running at. Basically, manipulate virtual machines which pretend to be bare metal servers. - -> The `virtbmc` Ansible role creates the `vbmc` and `sushy-tools` configuration in the host for each virtual bare metal nodes. Note that each virtual bare metal host will have a different `vbmc` socket exposed in the host. The communication to each `vbmc` is needed by the BMO to start, stop, configure the boot order, etc during the provisioning stage. Finally, this folders containing the configuration will be mounted by the `vbmc` and `sushy-tools` containers. +> VirtualBMC (`vmbc`) tool simulates a Baseboard Management Controller +> (BMC) by exposing IPMI responder to the network and talking to libvirt +> at the host vBMC is running at. Basically, manipulate virtual machines +> which pretend to be bare metal servers. +> +> The `virtbmc` Ansible role creates the `vbmc` and `sushy-tools` +> configuration in the host for each virtual bare metal nodes. Note that +> each virtual bare metal host will have a different `vbmc` socket +> exposed in the host. The communication to each `vbmc` is needed by the +> BMO to start, stop, configure the boot order, etc during the +> provisioning stage. Finally, this folders containing the configuration +> will be mounted by the `vbmc` and `sushy-tools` containers. +> > -> > - > ```sh > [alosadag@eko1 metal3-dev-env]$ sudo ls -l --color /opt/metal3-dev-env/virtualbmc > total 0 @@ -472,11 +675,10 @@ The `setup-playbook.yml` is composed by 3 roles, which are detailed below: > drwxr-x---. 4 root root 70 Feb 5 11:08 vbmc > ``` -```` - - -Next, both host provisioning and baremetal interfaces are configured. The provisioning interface, as the name suggests, will be used to provision the virtual bare metal hosts by means of the `Bare Metal Operator`. This interface is configured with an static IP (172.22.0.1): - +Next, both host provisioning and baremetal interfaces are configured. +The provisioning interface, as the name suggests, will be used to +provision the virtual bare metal hosts by means of the `Bare Metal +Operator`. This interface is configured with an static IP (172.22.0.1): ```sh [alosadag@smc-master metal3-dev-env]$ ifconfig provisioning @@ -486,7 +688,11 @@ provisioning: flags=4163 mtu 1500 ether 12:91:c1:a1:6a:0f txqueuelen 1000 (Ethernet) ```` -On the other hand, the baremetal virtual interface behaves as an external network. This interface is able to reach the internet and it is the network where the different Kubernetes nodes will exchange information. This interface is configured as auto, so the IP is retrieved by DHCP. +On the other hand, the baremetal virtual interface behaves as an +external network. This interface is able to reach the internet and it is +the network where the different Kubernetes nodes will exchange +information. This interface is configured as auto, so the IP is +retrieved by DHCP. ```sh [alosadag@smc-master metal3-dev-env]$ ifconfig baremetal @@ -495,7 +701,12 @@ baremetal: flags=4099 mtu 1500 ether 52:54:00:db:85:29 txqueuelen 1000 (Ethernet) ``` -Next, an Ansible role called [firewall](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/firewall.yml) will be executed targetting the host to be sure that the proper ports are opened. In case your host is running `Red Hat Enterprise Linux` or `CentOS 8`, firewall module will be used. In any other case, iptables module is the choice. +Next, an Ansible role called +[firewall](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/firewall.yml) +will be executed targetting the host to be sure that the proper ports +are opened. In case your host is running `Red Hat Enterprise Linux` or +`CentOS 8`, firewall module will be used. In any other case, iptables +module is the choice. Below is the code snippet where `firewalld` or `iptables` is assigned: @@ -511,16 +722,29 @@ fi ``` > note "Note" -> This behaviour can be changed by replacing the value of the `USE_FIREWALLD` variable +> This behaviour can be changed by replacing the value of the +> `USE_FIREWALLD` variable -The ports managed by this role are all associated with the services that take part in the provisioning process: `ironic`, `vbmc`, `httpd`, `pxe`, `container registry`.. +The ports managed by this role are all associated with the services that +take part in the provisioning process: `ironic`, `vbmc`, `httpd`, `pxe`, +`container registry`.. > note "Note" -> Services like ironic, pxe, keepalived, httpd and the container registry are running in the host as containers attached to the host network on the host's provisioning interface. On the other hand, the vbmc service is also running as a privileged container and it is listening in the host's baremetal interface. - -Once the network is configured, a local `container registry` is started. It will be needed in the case of using locally built images. In that case, the container images can be modified locally and pushed to the local registry. At that point, the specific image location variable must be changed so it must point out the local registry. This process makes it easy to verify and test changes to the code locally. - -At this point, the following containers are running inside two pods on the host: _infra-pod_ and _ironic-pod_. +> Services like ironic, pxe, keepalived, httpd and the container +> registry are running in the host as containers attached to the host +> network on the host's provisioning interface. On the other hand, the +> vbmc service is also running as a privileged container and it is +> listening in the host's baremetal interface. + +Once the network is configured, a local `container registry` is started. +It will be needed in the case of using locally built images. In that +case, the container images can be modified locally and pushed to the +local registry. At that point, the specific image location variable must +be changed so it must point out the local registry. This process makes +it easy to verify and test changes to the code locally. + +At this point, the following containers are running inside two pods on +the host: _infra-pod_ and _ironic-pod_. ```sh [root@eko1 metal3-dev-env]# podman pod list --ctr-names @@ -529,29 +753,31 @@ POD ID NAME STATUS CREATED CONTAINER INFO 5a0d475351aa ironic-pod Running 6 days ago [5a0d475351aa-infra] [ipa-downloader] 18f3a8f61407 ``` -Below are detailed the containers inside the _infra-pod_ pod which are running as privileged using the host network: +Below are detailed the containers inside the _infra-pod_ pod which are +running as privileged using the host network: > - **The httpd container.** > > -> A folder called _shared_ where the cloud OS image and IPA files are available is mounted and exposed to the virtual bare metal hosts. - -> ```sh +> A folder called _shared_ where the cloud OS image and IPA files are +> available is mounted and exposed to the virtual bare metal hosts. > +> ```sh +> sudo podman run -d --net host --privileged --name httpd-infra \ +> --pod infra-pod -v /opt/metal3-dev-env/ironic:/shared --entrypoint \ +> /bin/runhttpd quay.io/metal3-io/ironic > ``` - -- sudo podman run -d --net host --privileged --name httpd-infra --pod infra-pod -v /opt/metal3-dev-env/ironic:/shared --entrypoint /bin/runhttpd quay.io/metal3-io/ironic - -```` - - -> This folder also contains the `inspector.ipxe` file which contains the information needed to be able to run the `ironic-python-agent` kernel and initramfs. Below, httpd-infra container is accessed and it has been verified that host's `/opt/metal3-dev-env/ironic/` (`IRONIC_DATA_DIR`) is mounted inside the *shared* folder of the container: - +> +> This folder also contains the `inspector.ipxe` file which contains the +> information needed to be able to run the `ironic-python-agent` kernel +> and initramfs. Below, httpd-infra container is accessed and it has +> been verified that host's `/opt/metal3-dev-env/ironic/` +> (`IRONIC_DATA_DIR`) is mounted inside the _shared_ folder of the +> container: ```sh [alosadag@eko1 metal3-dev-env]$ sudo podman exec -it httpd-infra bash [root@infra-pod shared]# cat html/inspector.ipxe #!ipxe - :retry_boot echo In inspector.ipxe imgfree @@ -562,23 +788,33 @@ boot ```` > - **The vbmc container.** - -> This container mounts two host folders: one is `/opt/metal3-dev-env/virtualbmc/vbmc` where `vbmc` configuration for each node is stored, the other folder is `/root/.ssh` where root keys are located, specifically `id_rsa_virt_power` which is used to manage the communication with libvirt. - +> +> This container mounts two host folders: one is +> `/opt/metal3-dev-env/virtualbmc/vbmc` where `vbmc` configuration for +> each node is stored, the other folder is `/root/.ssh` where root keys +> are located, specifically `id_rsa_virt_power` which is used to manage +> the communication with libvirt. +> > ```sh > + sudo podman run -d --net host --privileged --name vbmc --pod infra-pod -v /opt/metal3-dev-env/virtualbmc/vbmc:/root/> .vbmc -v /root/.ssh:/root/ssh quay.io/metal3-io/vbmc > ``` > > - **The sushy-tools container.** > -> This container mounts the `/opt/metal3-dev-env/virtualbmc/sushy-tools config folder and the`/root/.ssh`local folder as well. The functionality is similar as the`vbmc`, however this use redfish instead of ipmi to connect to the BMC. - +> This container mounts the `/opt/metal3-dev-env/virtualbmc/sushy-tools +> config folder and the`/root/.ssh`local folder as well. The +> functionality is similar as the`vbmc`, however this use redfish +> instead of ipmi to connect to the BMC. +> > ```sh > + sudo podman run -d --net host --privileged --name sushy-tools --pod infra-pod -v /opt/metal3-dev-env/virtualbmc/> sushy-tools:/root/sushy -v /root/.ssh:/root/ssh quay.io/metal3-io/sushy-tools > ``` - +> > info "Information" -> At this point the virtual infrastructure must be ready to apply the Kubernetes specific configuration. Note that all the VMs specified by `NUMBER_NODES` and minikube must be shut down and the defined virtual network must be active: +> At this point the virtual infrastructure must be ready to apply the +> Kubernetes specific configuration. Note that all the VMs specified by +> `NUMBER_NODES` and minikube must be shut down and the defined virtual +> network must be active: ```sh [alosadag@smc-master metal3-dev-env]$ sudo virsh list --all @@ -599,17 +835,34 @@ boot provisioning active yes yes ``` -In the video below it is exhibited all the configuration explained and executed during this _second_ step. +In the video below it is exhibited all the configuration explained and +executed during this _second_ step. + + + + ## **Step 3: Launch the management cluster (minikube)** -The third script called `03_launch_mgmt_cluster.sh` basically configures minikube to become a Metal³ management cluster. On top of minikube the `baremetal-operator`, `capi-controller-manager`, `capbm-controller-manager` and `cabpk-controller-manager` are installed in the metal3 namespace. +The third script called `03_launch_mgmt_cluster.sh` basically configures +minikube to become a Metal³ management cluster. On top of minikube the +`baremetal-operator`, `capi-controller-manager`, +`capbm-controller-manager` and `cabpk-controller-manager` are installed +in the metal3 namespace. -In a more detailed way, the script clones the `Bare Metal Operator` ([BMO](https://github.com/metal3-io/baremetal-operator)) and `Cluster API Provider for Managed Bare Metal Hardware operator` ([CAPBM](https://github.com/metal3-io/cluster-api-provider-baremetal)) git repositories, creates the cloud.yaml file and starts the minikube virtual machine. Once minikube is up and running, the `BMO` is built and executed in minikube's Kubernetes cluster. +In a more detailed way, the script clones the `Bare Metal Operator` +([BMO](https://github.com/metal3-io/baremetal-operator)) and `Cluster +API Provider for Managed Bare Metal Hardware operator` +([CAPBM](https://github.com/metal3-io/cluster-api-provider-baremetal)) +git repositories, creates the cloud.yaml file and starts the minikube +virtual machine. Once minikube is up and running, the `BMO` is built and +executed in minikube's Kubernetes cluster. -In the case of the `Bare Metal Operator`, the branch by default to clone is master, however, this and other variables shown in the following table can be replaced in the config file: +In the case of the `Bare Metal Operator`, the branch by default to clone +is master, however, this and other variables shown in the following +table can be replaced in the config file: ```sh + BMOREPO=https://github.com/metal3-io/baremetal-operator.git @@ -618,15 +871,17 @@ In the case of the `Bare Metal Operator`, the branch by default to clone is mast | **Name of the variable** | **Default value** | **Options** | | ------------------------ | --------------------------------------------------------------- | -------------------- | -| BMOREPO | https://github.com/metal3-io/baremetal-operator.git | -| BMOBRANCH | master | -| CAPBMREPO | https://github.com/metal3-io/cluster-api-provider-baremetal.git | +| BMOREPO | | | +| BMOBRANCH | master | | +| CAPBMREPO | | | | CAPM3_VERSION | v1alpha3 | v1alpha4 or v1alpha3 | -| FORCE_REPO_UPDATE | false | -| BMO_RUN_LOCAL | false | -| CAPBM_RUN_LOCAL | false | +| FORCE_REPO_UPDATE | false | | +| BMO_RUN_LOCAL | false | | +| CAPBM_RUN_LOCAL | false | | -Once the `BMO` variables are configured, it is time for the operator to be deployed using `kustomize` and `kubectl` as it can seen from the logs: +Once the `BMO` variables are configured, it is time for the operator to +be deployed using `kustomize` and `kubectl` as it can seen from the +logs: > **Information:** [Kustomize](https://github.com/kubernetes-sigs/kustomize) is a Kubernetes tool that lets you customize raw, template-free YAML files for multiple purposes, leaving the original YAML untouched and usable as is. @@ -643,7 +898,11 @@ secret/mariadb-password-d88m524c46 created deployment.apps/metal3-baremetal-operator created ``` -Once the `BMO` objects are applied, it's time to transform the virtual bare metal hosts information into a yaml file of kind `BareMetalHost` Custom Resource (CR). This is done by a golang script passing them the IPMI address, BMC username and password, which are stored as a Kubernetes secret, MAC address and name: +Once the `BMO` objects are applied, it's time to transform the virtual +bare metal hosts information into a yaml file of kind `BareMetalHost` +Custom Resource (CR). This is done by a golang script passing them the +IPMI address, BMC username and password, which are stored as a +Kubernetes secret, MAC address and name: ```sh + go run /home/alosadag/go/src/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go -address ipmi://192.168.111.1:6230 -password password -user admin -boot-mac 00:be:bc:fd:17:f3 node-0 @@ -654,7 +913,9 @@ Once the `BMO` objects are applied, it's time to transform the virtual bare meta + read -r name address user password mac ``` -Below is shown the bare metal host definition of node-1. Note that the IPMI address is the IP of the host's provisioning interface. Behind the scenes, IPMI is handled by the `vbmc` container running in the host. +Below is shown the bare metal host definition of node-1. Note that the +IPMI address is the IP of the host's provisioning interface. Behind the +scenes, IPMI is handled by the `vbmc` container running in the host. ```yaml --- @@ -679,7 +940,8 @@ spec: credentialsName: node-1-bmc-secret ``` -See that the MAC address configured in the `BareMetalHost` spec definition matches _node-1_ provisioning interface: +See that the MAC address configured in the `BareMetalHost` spec +definition matches _node-1_ provisioning interface: ```sh [root@eko1 metal3-dev-env]# virsh domiflist node_1 @@ -689,7 +951,8 @@ vnet4 bridge provisioning virtio 00:00:e0:4b:24:8f vnet5 bridge baremetal virtio 00:00:e0:4b:24:91 ``` -Finally, the script apply in namespace metal3 each of the `BareMetalHost` yaml files that match each virtual bare metal host: +Finally, the script apply in namespace metal3 each of the +`BareMetalHost` yaml files that match each virtual bare metal host: ```sh + kubectl apply -f bmhosts_crs.yaml -n metal3 @@ -701,10 +964,16 @@ secret/node-2-bmc-secret created baremetalhost.metal3.io/node-2 created ``` -Lastly, it is the turn of the `CAPBM`. Similar to `BMO`, `kustomize` is used to create the different Kubernetes components and `kubectl` applied the files into the management cluster. +Lastly, it is the turn of the `CAPBM`. Similar to `BMO`, `kustomize` is +used to create the different Kubernetes components and `kubectl` applied +the files into the management cluster. > warning "Warning" -> Note that installing `CAPBM` includes installing the components of the [Cluster API](https://github.com/kubernetes-sigs/cluster-api) and the components of the [Cluster API bootstrap provider kubeadm](https://github.com/kubernetes-sigs/cluster-api/tree/master/bootstrap/kubeadm) (CABPK) +> Note that installing `CAPBM` includes installing the components of the +> [Cluster API](https://github.com/kubernetes-sigs/cluster-api) and the +> components of the [Cluster API bootstrap provider +> kubeadm](https://github.com/kubernetes-sigs/cluster-api/tree/master/bootstrap/kubeadm) +> (CABPK) Below the objects are created through the `generate.sh` script: @@ -725,7 +994,8 @@ Generated /home/alosadag/go/src/github.com/metal3-io/cluster-api-provider-bareme Generated /home/alosadag/go/src/github.com/metal3-io/cluster-api-provider-baremetal/examples/_out/provider-components.yaml ``` -Then, `kustomize` configures the files accordingly to the values defined and `kubectl` applies them: +Then, `kustomize` configures the files accordingly to the values defined +and `kubectl` applies them: ```sh + kustomize build capbm-eJPOjCPASD @@ -768,23 +1038,37 @@ deployment.apps/capi-controller-manager created ``` > info "Information" -> At this point all controllers and operators must be running in the namespace metal3 of the management cluster (minikube). All virtual bare metal hosts configured must be shown as `BareMetalHosts` resources in the metal3 namespace as well. They should be in ready status and stopped (online is false) +> At this point all controllers and operators must be running in the +> namespace metal3 of the management cluster (minikube). All virtual +> bare metal hosts configured must be shown as `BareMetalHosts` +> resources in the metal3 namespace as well. They should be in ready +> status and stopped (online is false) In the video below it is exhibited all the configuration explained and executed during this _third_ step. + + + + ## **Step 4: Verification** -The last script `04_verify.sh` is in charge of verifying that the deployment has been successful by checking several things: +The last script `04_verify.sh` is in charge of verifying that the +deployment has been successful by checking several things: -- Custom resources (CR) and custom resource definition (CRD) were applied and exist in the cluster. -- Verify that the virtual bare metal hosts matches the information detailed in the`BareMetalHost` object. +- Custom resources (CR) and custom resource definition (CRD) were + applied and exist in the cluster. +- Verify that the virtual bare metal hosts matches the information + detailed in the`BareMetalHost` object. - All containers are in running status. - Verify virtual network configuration and status. - Verify operators and controllers are running. -However, this verification can be easily achieved manually. For instance, checking that controllers and operators running in the management cluster (minikube) and all the virtual bare metal hosts are in ready status: +However, this verification can be easily achieved manually. For +instance, checking that controllers and operators running in the +management cluster (minikube) and all the virtual bare metal hosts are +in ready status: ```sh [alosadag@eko1 ~]$ kubectl get pods -n metal3 -o wide @@ -795,7 +1079,9 @@ capi-controller-manager-798c76675f-dxh2n 1/1 Running 10 6d23h metal3-baremetal-operator-5b4c59755d-h4zkp 6/6 Running 8 6d23h 192.168.39.101 minikube ``` -Verify that the `BareMetalHosts` provisioning status is `ready` and the BMC configuration is correct. Check that all virtual bare metal hosts are shut down (online is false): +Verify that the `BareMetalHosts` provisioning status is `ready` and the +BMC configuration is correct. Check that all virtual bare metal hosts +are shut down (online is false): ```sh [alosadag@eko1 ~]$ kubectl get baremetalhosts -n metal3 @@ -805,7 +1091,8 @@ node-1 OK ready ipmi://192.168.111. node-2 OK ready ipmi://192.168.111.1:6232 unknown false ``` -Get the list of CRDs created in the cluster. Check that, at least, the following ones exist: +Get the list of CRDs created in the cluster. Check that, at least, the +following ones exist: ```sh [alosadag@eko1 ~]$ kubectl get crds @@ -823,9 +1110,11 @@ machinesets.cluster.x-k8s.io 2020-01-22T13:19:43Z ``` > info "Information" -> `KUBECONFIG` file is stored in the user’s home directory (~/.kube/config) that executed the scripts. +> `KUBECONFIG` file is stored in the user’s home directory +> (~/.kube/config) that executed the scripts. -Check the status of all the applications running in minikube or better said, in the management cluster. +Check the status of all the applications running in minikube or better +said, in the management cluster. ```sh [alosadag@smc-master logs]$ kubectl get pods -A @@ -845,17 +1134,32 @@ metal3 capi-controller-manager-84947c7497-k6twl 1/1 Running 0 metal3 metal3-baremetal-operator-78bffc8d-z5hqs 6/6 Running 0 156m ``` -In the video below it is exhibited all the configuration explained and executed during the _verification_ steps. +In the video below it is exhibited all the configuration explained and +executed during the _verification_ steps. - + -## **Summary** + -In this post a deep dive into the `metal3-dev-env` scripts was shown. It has been deeply detailed the process of creating a Metal³ **emulated environment** from a set of virtual machines (VMs) to manage as if they were bare metal hosts. + -After this post, the reader should have acquired a basic understanding of all the pieces involved in the Metal³ project. Also, and more important, how these scripts can be adapted to your specific needs. Remember that this can be achieved in multiple ways: replacing values in the global variables, replacing Ansible default variables or even modifying playbooks or the scripts themselves. +## **Summary** -Notice that the Metal³ development environment also focuses on developing new features of the BMO or CAPBM and being able to test them locally. +In this post a deep dive into the `metal3-dev-env` scripts was shown. It +has been deeply detailed the process of creating a Metal³ **emulated +environment** from a set of virtual machines (VMs) to manage as if they +were bare metal hosts. + +After this post, the reader should have acquired a basic understanding +of all the pieces involved in the Metal³ project. Also, and more +important, how these scripts can be adapted to your specific needs. +Remember that this can be achieved in multiple ways: replacing values in +the global variables, replacing Ansible default variables or even +modifying playbooks or the scripts themselves. + +Notice that the Metal³ development environment also focuses on +developing new features of the BMO or CAPBM and being able to test them +locally. ## **References** diff --git a/_posts/2020-02-27-talk-kubernetes-finland-metal3.md b/_posts/2020-02-27-talk-kubernetes-finland-metal3.md index 3c2220206..301f77a41 100644 --- a/_posts/2020-02-27-talk-kubernetes-finland-metal3.md +++ b/_posts/2020-02-27-talk-kubernetes-finland-metal3.md @@ -15,13 +15,15 @@ In this presentation, Maël starts giving a short introduction of the [Cluster A > info "Information" > The video recording from the “Kubernetes and CNCF Finland Meetup” is composed of three talks. The video embedded starts with Maël's talk. - +> > warning "Warning" > Playback of the video has been disabled by the author. Click on the play button and then on the "Watch this video on Youtube" link once it appears. + + -
+ During the first part of the presentation, a detailed explanation of the different Kubernetes Custom Resource Definitions (CRDs) inside Metal³ is shown as also how they are linked with the Cluster API project. As an example, the image below shows the interaction between objects and controllers from both projects: @@ -31,12 +33,10 @@ Once finished the introductory part, Maël focuses on the main components of the ![metal3 introspection](/assets/2020-02-27-talk-kubernetes-finland-metal3/metal3-instrospection.resized.png) -
The second part of the process is the **provisioning**. In this step, Maël explains how the Bare Metal Operator (BMO) is in charge along with Ironic to present the Operating System image to the physical server and complete its installation. ![metal3 provisioning](/assets/2020-02-27-talk-kubernetes-finland-metal3/metal3-provisioning.resized.png) -
Next, Maël deeply explains each Custom Resource (CR) used during the provisioning of target Kubernetes clusters in bare metal servers. He refers to objects such as `Cluster`, `BareMetalCluster`, `Machine`, `BareMetalMachine`, `BareMetalHost` and so on. Each one is clarified with a YAML file definition of a real case and a workflow diagram that shows the reconciliation procedure. The last part of the talk is dedicated to executing a demo where Maël creates a _target Kubernetes cluster_ from a running minikube VM (also called _bootstrap cluster_) where Metal³ is deployed. As it is pointed out in the video, the demo is running in _emulated hardware_. Actually, something similar to the [metal3-dev-env](https://github.com/metal3-io/metal3-dev-env) project can be used to reproduce the demo. More information on the Metal³ development environment (metal3-dev-env) can be found in the [Metal³ try-it section](https://metal3.io/try-it.html). In case you want to go deeper, take a look at the blog post [A detailed walkthrough of the Metal³ development environment]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}). diff --git a/_posts/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment.md b/_posts/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment.md index bdabbba59..a0c82daba 100644 --- a/_posts/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment.md +++ b/_posts/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment.md @@ -7,35 +7,58 @@ author: Himanshu Roy ## Introduction -This blog post describes how to deploy a bare metal cluster, a virtual one for simplicity, using [Metal³/metal3-dev-env](https://github.com/metal3-io/metal3-dev-env). We will briefly discuss the steps involved in setting up the cluster as well as some of the customization available. If you want to know more about the architecture of Metal³, this [blogpost]({%post_url 2020-02-27-talk-kubernetes-finland-metal3 %}) can be helpful. - -This post builds upon the [detailed metal3-dev-env walkthrough blogpost]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}) which describes in detail the steps involved in the environment set-up and management cluster configuration. Here we will use that environment to deploy a new Kubernetes cluster using Metal³. - -Before we get started, there are a couple of requirements we are expecting to be fulfilled. +This blog post describes how to deploy a bare metal cluster, a virtual +one for simplicity, using +[Metal³/metal3-dev-env](https://github.com/metal3-io/metal3-dev-env). We +will briefly discuss the steps involved in setting up the cluster as +well as some of the customization available. If you want to know more +about the architecture of Metal³, this [blogpost]({%post_url +2020-02-27-talk-kubernetes-finland-metal3 %}) can be helpful. + +This post builds upon the [detailed metal3-dev-env walkthrough +blogpost]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}) +which describes in detail the steps involved in the environment set-up +and management cluster configuration. Here we will use that environment +to deploy a new Kubernetes cluster using Metal³. + +Before we get started, there are a couple of requirements we are +expecting to be fulfilled. ## Requirements -- Metal³ is already deployed and working, if not please follow the instructions in the previously mentioned [detailed metal3-dev-env walkthrough blogpost]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}). -- The appropriate environment variables are setup via shell or in the `config_${user}.sh` file, for example - - - CAPM3_VERSION - - NUM_NODES - - CLUSTER_NAME +- Metal³ is already deployed and working, if not please follow the + instructions in the previously mentioned [detailed metal3-dev-env + walkthrough blogpost]({%post_url + 2020-02-18-metal3-dev-env-install-deep-dive %}). +- The appropriate environment variables are setup via shell or in the + `config_${user}.sh` file, for example + - CAPM3_VERSION + - NUM_NODES + - CLUSTER_NAME ## Overview of Config and Resource types -In this section, we give a brief overview of the important config files and resources used as part of the bare metal cluster deployment. -The following sub-sections show the config files and resources that are created and give a brief description of some of them. This will help you understand the technical details of the cluster deployment. You can also choose to skip this section, visit the next section about _provisioning_ first and then revisit this. +In this section, we give a brief overview of the important config files +and resources used as part of the bare metal cluster deployment. The +following sub-sections show the config files and resources that are +created and give a brief description of some of them. This will help you +understand the technical details of the cluster deployment. You can also +choose to skip this section, visit the next section about _provisioning_ +first and then revisit this. ### Config Files and Resources Types !["The directory tree for the ansible role used for deployment"](/assets/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment/manifest-directory.png) -> info "Information" -> Among these the config files are rendered under the path `https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/files` as part of the provisioning process. +> info "Information" Among these the config files are rendered under the +> path +> `https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/files` +> as part of the provisioning process. -
+A description of some of the files part of provisioning a cluster, in a +centos-based environment: -A description of some of the files part of provisioning a cluster, in a centos-based environment : + | Name | Description | Path | | ----------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | @@ -46,8 +69,6 @@ A description of some of the files part of provisioning a cluster, in a centos-b | [generate templates](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/tasks/generate_templates.yml) | Renders cluster, control plane and worker definitions in the `Manifest` directory | `${metal3-dev-env}/vm-setup/roles/v1aX_integration_test/tasks/generate_templates.yml` | | [main vars file](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/vars/main.yml) | Variable file that assigns all the defaults used during deployment | `${metal3-dev-env}/vm-setup/roles/v1aX_integration_test/vars/main.yml` | -
- Here are some of the resources that are created as part of provisioning : | Name | Description | @@ -62,14 +83,19 @@ Here are some of the resources that are created as part of provisioning : | Metal3MachineTemplate | Metal3 resource which acts as a template when creating a control plane or a worker node | | KubeadmConfigTemplate | A template of `KubeadmConfig`, for Workers, used to generate KubeadmConfig when a new worker node is provisioned | -> note "Note" -> The corresponding `KubeadmConfig` is copied to the control plane/worker at the time of provisioning. + -
+> note "Note" +> The corresponding `KubeadmConfig` is copied to the control +> plane/worker at the time of provisioning. ## Bare Metal Cluster Deployment -The deployment scripts primarily use ansible and the existing Kubernetes management cluster (based on minikube ) for deploying the bare-metal cluster. Make sure that some of the environment variables used for Metal³ deployment are set, if you didn't use `config_${user}.sh` for setting the environment variables. +The deployment scripts primarily use ansible and the existing Kubernetes +management cluster (based on minikube ) for deploying the bare-metal +cluster. Make sure that some of the environment variables used for +Metal³ deployment are set, if you didn't use `config_${user}.sh` for +setting the environment variables. | Parameter | Description | Default | | ------------- | -------------------------- | -------------- | @@ -77,47 +103,70 @@ The deployment scripts primarily use ansible and the existing Kubernetes managem | POD_CIDR | Pod Network CIDR | 192.168.0.0/18 | | CLUSTER_NAME | Name of bare metal cluster | test1 | -
+=== ### Steps Involved -All the scripts for cluster provisioning or de-provisioning are located at - [`${metal3-dev-env}/scripts/`](https://github.com/metal3-io/metal3-dev-env/tree/master/scripts). The scripts call a common playbook which handles all the tasks that are available. - -The steps involved in the process are : - -- The script calls an ansible playbook with necessary parameters ( from env variables and defaults ) -- The playbook executes the role -, [`${metal3-dev-env}/vm-setup/roles/v1aX_integration_test`](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test), which runs the main [task_file](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/tasks/main.yml) for provisioning/deprovisioning the cluster, control plane or a worker -- There are [templates](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/templates) in the role, which are used to render configurations in the `Manifest` directory. These configurations use kubeadm and are supplied to the Kubernetes module of ansible to create the cluster. -- During provisioning, first the `clusterctl` env file is generated, then the cluster, control plane and worker definition templates for `clusterctl` are generated at `${HOME}/.cluster-api/overrides/infrastructure-metal3/${CAPM3RELEASE}`. -- Using the templates generated in the previous step, the definitions for resources related to cluster, control plane and worker are rendered using `clusterctl`. -- Centos or Ubuntu image [is downloaded](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/v1aX_integration_test/tasks/download_image.yml) in the next step. -- Finally using the above definitions, which are passed to the `K8s` module in ansible, the corresponding resource( cluster/control plane/worker ) is provisioned. -- These same definitions are reused at the time of de-provisioning the corresponding resource, again using the `K8s` module in ansible - > note "Note" - > The manifest directory is created when provisioning is triggered for the first time and is subsequently used to store the config files that are rendered for deploying the bare metal cluster. - -
-
- -!["An Overview of various resources generated while provisioning and their relationship amongst themselves"](/assets/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment/metal3-bmetal-arch-overview.png) -
-
+All the scripts for cluster provisioning or de-provisioning are located +at - +[`${metal3-dev-env}/scripts/`](https://github.com/metal3-io/metal3-dev-env/tree/master/scripts). +The scripts call a common playbook which handles all the tasks that are +available. + +The steps involved in the process are: + +- The script calls an ansible playbook with necessary parameters ( from + env variables and defaults ) +- The playbook executes the role -, + [`${metal3-dev-env}/vm-setup/roles/v1aX_integration_test`](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test), + which runs the main + [task_file](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/tasks/main.yml) + for provisioning/deprovisioning the cluster, control plane or a worker +- There are + [templates](https://github.com/metal3-io/metal3-dev-env/tree/master/vm-setup/roles/v1aX_integration_test/templates) + in the role, which are used to render configurations in the `Manifest` + directory. These configurations use kubeadm and are supplied to the + Kubernetes module of ansible to create the cluster. +- During provisioning, first the `clusterctl` env file is generated, + then the cluster, control plane and worker definition templates for + `clusterctl` are generated at + `${HOME}/.cluster-api/overrides/infrastructure-metal3/${CAPM3RELEASE}`. +- Using the templates generated in the previous step, the definitions + for resources related to cluster, control plane and worker are + rendered using `clusterctl`. +- Centos or Ubuntu image [is + downloaded](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/v1aX_integration_test/tasks/download_image.yml) + in the next step. +- Finally using the above definitions, which are passed to the `K8s` + module in ansible, the corresponding resource( cluster/control + plane/worker ) is provisioned. +- These same definitions are reused at the time of de-provisioning the + corresponding resource, again using the `K8s` module in ansible + > note "Note" The manifest directory is created when provisioning is + > triggered for the first time and is subsequently used to store the + > config files that are rendered for deploying the bare metal cluster. + +!["An Overview of various resources generated while provisioning and +their relationship amongst +themselves"](/assets/2020-06-18-Metal3-dev-env-BareMetal-Cluster-Deployment/metal3-bmetal-arch-overview.png) ### Provision Cluster -This script, located at the path - `${metal3-dev-env}/scripts/provision/cluster.sh`, provisions the cluster by creating a `Metal3Cluster` and a `Cluster` resource. +This script, located at the path - +`${metal3-dev-env}/scripts/provision/cluster.sh`, provisions the cluster +by creating a `Metal3Cluster` and a `Cluster` resource. -
-To see if you have a successful Cluster resource creation( the cluster still doesn't have a control plane or workers ), just do : +To see if you have a successful Cluster resource creation( the cluster +still doesn't have a control plane or workers ), just do: ```console kubectl get Metal3Cluster ${CLUSTER_NAME} -n metal3 ``` -> This will return the cluster deployed, and you can check the cluster details by describing the returned resource. +> This will return the cluster deployed, and you can check the cluster +> details by describing the returned resource. -
-Here is what a `Cluster` resource looks like : +Here is what a `Cluster` resource looks like: ```console kubectl describe Cluster ${CLUSTER_NAME} -n metal3 @@ -131,10 +180,10 @@ spec: clusterNetwork: pods: cidrBlocks: - - 192.168.0.0/18 + - 192.168.0.0/18 services: cidrBlocks: - - 10.96.0.0/12 + - 10.96.0.0/12 controlPlaneEndpoint: host: 192.168.111.249 port: 6443 @@ -153,15 +202,20 @@ status: phase: Provisioned ``` -
-
- ### Provision Controlplane -This script, located at the path - `${metal3-dev-env}/scripts/provision/controlplane.sh`, provisions the control plane member of the cluster using the rendered definition of the control plane explained in the **Steps Involved** section. The `KubeadmControlPlane` creates a `Machine` which picks up a BareMetalHost satisfying its requirements as the control plane node, and it is then provisioned by the Bare Metal Operator. A `Metal3MachineTemplate` resource is also created as part of the provisioning process. +This script, located at the path - +`${metal3-dev-env}/scripts/provision/controlplane.sh`, provisions the +control plane member of the cluster using the rendered definition of the +control plane explained in the **Steps Involved** section. The +`KubeadmControlPlane` creates a `Machine` which picks up a BareMetalHost +satisfying its requirements as the control plane node, and it is then +provisioned by the Bare Metal Operator. A `Metal3MachineTemplate` +resource is also created as part of the provisioning process. > note "Note" -> It takes some time for the provisioning of the control plane, you can watch the process using some steps shared a bit later +> It takes some time for the provisioning of the control plane, you can +> watch the process using some steps shared a bit later ```console kubectl get KubeadmControlPlane ${CLUSTER_NAME} -n metal3 @@ -206,55 +260,71 @@ status: kubectl get Metal3MachineTemplate ${CLUSTER_NAME}-controlplane -n metal3 ``` -
- To track the progress of provisioning, you can try the following: ```console kubectl get BareMetalHosts -n metal3 -w ``` -> The `BareMetalHosts` resource is created when `Metal³/metal3-dev-env` was deployed. It is a kubernetes resource that represents a bare metal Machine, with all its details and configuration, and is managed by the `Bare Metal Operator`. You can also use the short representation instead, i.e. `bmh` ( short for `BareMetalHosts`) in the command above. - -> You should see all the nodes that were created at the time of metal3 deployment, along with their current status as the provisioning progresses +> The `BareMetalHosts` resource is created when `Metal³/metal3-dev-env` +> was deployed. It is a kubernetes resource that represents a bare metal +> Machine, with all its details and configuration, and is managed by the +> `Bare Metal Operator`. You can also use the short representation +> instead, i.e. `bmh` ( short for `BareMetalHosts`) in the command +> above. +> You should see all the nodes that were created at the time of metal3 +> deployment, along with their current status as the provisioning +> progresses > note "Note" -> All the bare metal hosts listed above were created when Metal³ was deployed in the _detailed metal3-dev-env walkthrough blogpost_. +> All the bare metal hosts listed above were created when Metal³ was +> deployed in the _detailed metal3-dev-env walkthrough blogpost_. ```console kubectl get Machine -n metal3 -w ``` -> This shows the status of the Machine associated with the control plane and we can watch the status of provisioning under PHASE +> This shows the status of the Machine associated with the control plane +> and we can watch the status of provisioning under PHASE -
-Once the provisioning is finished, let's get the host-ip : +Once the provisioning is finished, let's get the host-ip: ```console sudo virsh net-dhcp-leases baremetal ``` > info "Information" -> `baremetal` is one of the 2 networks that were created at the time of Metal3 deployment, the other being “provisioning” which is used - as you have guessed - for provisioning the bare metal cluster. More details about networking setup in the metal3-dev-env environment are described in the - [detailed metal3-dev-env walkthrough blogpost]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}). +> `baremetal` is one of the 2 networks that were created at the time of +> Metal3 deployment, the other being “provisioning” which is used - as +> you have guessed - for provisioning the bare metal cluster. More +> details about networking setup in the metal3-dev-env environment are +> described in the - [detailed metal3-dev-env walkthrough +> blogpost]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}). -
-You can log in to the control plane node if you want, and can check the deployment status using two methods. +You can log in to the control plane node if you want, and can check the +deployment status using two methods. ```console ssh metal3@{control-plane-node-ip} ssh metal3@192.168.111.249 ``` -
-
- ### Provision Workers -The script is located at `${metal3-dev-env-path}/scripts/provision/worker.sh` and it provisions a node to be added as a worker to the bare metal cluster. It selects one of the remaining nodes and provisions it and adds it to the bare metal cluster ( which only has a control plane node at this point ). The resources created for workers are - `MachineDeployment` which can be scaled up to add more workers to the cluster and `MachineSet` which then creates a `Machine` managing the node. +The script is located at +`${metal3-dev-env-path}/scripts/provision/worker.sh` and it provisions a +node to be added as a worker to the bare metal cluster. It selects one +of the remaining nodes and provisions it and adds it to the bare metal +cluster ( which only has a control plane node at this point ). The +resources created for workers are - `MachineDeployment` which can be +scaled up to add more workers to the cluster and `MachineSet` which then +creates a `Machine` managing the node. > info "Information" -> Similar to control plane provisioning, worker provisioning also takes some time, and you can watch the process using steps shared a bit later. This will also apply when you scale Up/Down workers at a later point in time. +> Similar to control plane provisioning, worker provisioning also takes +> some time, and you can watch the process using steps shared a bit +> later. This will also apply when you scale Up/Down workers at a later +> point in time. -
This is what a `MachineDeployment` looks like ```console @@ -315,20 +385,22 @@ status: updatedReplicas: 1 ``` -
-To check the status we can follow steps similar to Controlplane case : +To check the status we can follow steps similar to Controlplane case: ```console kubectl get bmh -n metal3 -w ``` -> We can see the live status of the node being provisioned. As mentioned before `bmh` is the short representation of `BareMetalHosts`. +> We can see the live status of the node being provisioned. As mentioned +> before `bmh` is the short representation of `BareMetalHosts`. ```console kubectl get Machine -n metal3 -w ``` -> This shows the status of Machines associated with workers, apart from the one for Controlplane, and we can watch the status of provisioning under PHASE +> This shows the status of Machines associated with workers, apart from +> the one for Controlplane, and we can watch the status of provisioning +> under PHASE ```console sudo virsh net-dhcp-leases baremetal @@ -353,18 +425,24 @@ ssh metal3@{node-ip} kubectl scale --replicas=3 MachineDeployment ${CLUSTER_NAME} -n metal3 ``` -> We can add or remove workers to the cluster, and we can scale up the MachineDeployment up or down, in this example we are adding 2 more worker nodes, making the total nodes = 3 - -
-
+> We can add or remove workers to the cluster, and we can scale up the +> MachineDeployment up or down, in this example we are adding 2 more +> worker nodes, making the total nodes = 3 ### Deprovisioning -All of the previous components have corresponding de-provisioning scripts which use config files, in the previously mentioned manifest directory, and use them to clean up the worker, control plane and cluster. +All of the previous components have corresponding de-provisioning +scripts which use config files, in the previously mentioned manifest +directory, and use them to clean up the worker, control plane and +cluster. -This step will use the already generated cluster/control plane/worker definition file, and supply it to **Kubernetes** ansible module to remove/de-provision the resource. You can find it, under the `Manifest` directory, in the Snapshot shared at the beginning of this blogpost where we show the file structure. +This step will use the already generated cluster/control plane/worker +definition file, and supply it to **Kubernetes** ansible module to +remove/de-provision the resource. You can find it, under the `Manifest` +directory, in the Snapshot shared at the beginning of this blogpost +where we show the file structure. -For example, if you wish to de-provision the cluster, you would do : +For example, if you wish to de-provision the cluster, you would do: ```console sh ${metal3-dev-env-path}/scripts/deprovision/worker.sh @@ -373,25 +451,49 @@ sh ${metal3-dev-env-path}/scripts/deprovision/cluster.sh ``` > note "Note" -> The reason for running the `deprovision/worker.sh` and `deprovision/controlplane.sh` scripts is that not all objects are cleared when we just run the `deprovision/cluster.sh` script. Following this, if you want to de-provision the control plane it is recommended to de-provision the cluster itself since we can't provision a new control plane with the same cluster. For worker de-provisioning, we only need to run the worker script. +> The reason for running the `deprovision/worker.sh` and +> `deprovision/controlplane.sh` scripts is that not all objects are +> cleared when we just run the `deprovision/cluster.sh` script. +> Following this, if you want to de-provision the control plane it is +> recommended to de-provision the cluster itself since we can't +> provision a new control plane with the same cluster. For worker +> de-provisioning, we only need to run the worker script. -The following video demonstrates all the steps to provision and de-provision a Kubernetes cluster explained above. +The following video demonstrates all the steps to provision and +de-provision a Kubernetes cluster explained above. + + + + ## Summary -In this blogpost we saw how to deploy a bare metal cluster once we have a Metal³(metal3-dev-env repo) deployed and by that point we will already have the nodes ready to be used for a bare metal cluster deployment. +In this blogpost we saw how to deploy a bare metal cluster once we have +a Metal³(metal3-dev-env repo) deployed and by that point we will already +have the nodes ready to be used for a bare metal cluster deployment. -In the first section, we show the various configuration files, templates, resource types and their meanings. Then we see the common steps involved in the provisioning process. After that, we see a general overview of how all resources are related and at what point are they created - provision cluster/control plane/worker. +In the first section, we show the various configuration files, +templates, resource types and their meanings. Then we see the common +steps involved in the provisioning process. After that, we see a general +overview of how all resources are related and at what point are they +created - provision cluster/control plane/worker. -In each of the provisioning sections, we see the steps to monitor the provisioning and how to confirm if it's successful or not, with brief explanations wherever required. Finally, we see the de-provisioning section which uses the resource definitions generated at the time of provisioning to de-provision cluster, control plane or worker. +In each of the provisioning sections, we see the steps to monitor the +provisioning and how to confirm if it's successful or not, with brief +explanations wherever required. Finally, we see the de-provisioning +section which uses the resource definitions generated at the time of +provisioning to de-provision cluster, control plane or worker. -Here are a few resources which you might find useful if you want to explore further, some of them have already been shared earlier. +Here are a few resources which you might find useful if you want to +explore further, some of them have already been shared earlier. - [Metal3-Documentation](https://metal3.io/) - - [Metal3-Try-it](https://metal3.io/try-it.html) + - [Metal3-Try-it](https://metal3.io/try-it.html) - [Metal³/metal3-dev-env](https://github.com/metal3-io/metal3-dev-env) -- [Detailed metal3-dev-env walkthrough blogpost]({%post_url 2020-02-18-metal3-dev-env-install-deep-dive %}) -- [Kubernetes Metal3 Talk]({%post_url 2020-02-27-talk-kubernetes-finland-metal3 %}) +- [Detailed metal3-dev-env walkthrough blogpost]({%post_url + 2020-02-18-metal3-dev-env-install-deep-dive %}) +- [Kubernetes Metal3 Talk]({%post_url + 2020-02-27-talk-kubernetes-finland-metal3 %}) - [Metal3-Docs-github](https://github.com/metal3-io/metal3-docs) diff --git a/_posts/2020-07-05-raw-image-streaming.md b/_posts/2020-07-05-raw-image-streaming.md index 52376d42b..bc6b089f3 100644 --- a/_posts/2020-07-05-raw-image-streaming.md +++ b/_posts/2020-07-05-raw-image-streaming.md @@ -47,7 +47,8 @@ download the image in memory. However, with raw images, the only constraint on memory is to run IPA (so 4GB). For example, in order to deploy an Ubuntu image (around 700MB, QCOW2), the requirement is 8GB when in QCOW2 format, while it is only 4GB (as for any other image) when streamed as raw. This allows -the deployment of images that are bigger than the available memory on constrained nodes. +the deployment of images that are bigger than the available memory on +constrained nodes. However, this shifts the load on the network, since the raw images are usually much bigger than other formats. Using this feature in network constrained diff --git a/_posts/2021-05-05-Pivoting.md b/_posts/2021-05-05-Pivoting.md index 0db2439eb..0a37536e5 100644 --- a/_posts/2021-05-05-Pivoting.md +++ b/_posts/2021-05-05-Pivoting.md @@ -52,11 +52,11 @@ CI deployment workflow: within it. The corresponding metal3-dev-env command is `make` - `provision` target cluster. For normal integration tests, this step deploys a control-plane node and a worker in the target cluster. For, `feature-test` - and `feature-test-upgrade` the provision step deploys three control-planes and a - worker. The corresponding metal3-dev-env commands are (normal integration test - workflow): + and `feature-test-upgrade` the provision step deploys three control-planes and + a worker. The corresponding metal3-dev-env commands are (normal integration + test workflow): -``` +```shell ./scripts/provision/cluster.sh ./scripts/provision/controlplane.sh ./scripts/provision/worker.sh @@ -74,7 +74,7 @@ CI deployment workflow: verifies and finalizes the pivoting process. The corresponding metal3-dev-env the command that performs this and the previous two steps is : -``` +```shell ./scripts/feature_tests/pivoting/pivot.sh ``` @@ -85,15 +85,15 @@ CI deployment workflow: installing them again is not necessary. The corresponding metal3-dev-env command that performs this step is : -``` +```shell ./scripts/feature_tests/pivoting/repivot.sh ``` - `De-provision` the BMHs and delete the target cluster. The corresponding - metal3-dev-env commands to de-provision worker, controlplane and the cluster is as - follows: + metal3-dev-env commands to de-provision worker, controlplane and the cluster + is as follows: -``` +```shell ./scripts/deprovision/worker.sh ./scripts/deprovision/controlplane.sh ./scripts/deprovision/cluster.sh @@ -106,8 +106,10 @@ controlplane automatically. The pivoting process described above is realized in `ansible` scripts [move.yml](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/v1aX_integration_test/tasks/move.yml) -and [move_back.yml](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/v1aX_integration_test/tasks/move_back.yml). Under the hood, pivoting uses the -`move` command from [clusterctl](https://cluster-api.sigs.k8s.io/clusterctl/commands/move.html) +and +[move_back.yml](https://github.com/metal3-io/metal3-dev-env/blob/master/vm-setup/roles/v1aX_integration_test/tasks/move_back.yml). +Under the hood, pivoting uses the `move` command from +[clusterctl](https://cluster-api.sigs.k8s.io/clusterctl/commands/move.html) provided by Cluster-API. As stated earlier, all the PRs that go into any Metal3 repository where the diff --git a/_posts/2022-07-08-One_cluster_multiple_providers.md b/_posts/2022-07-08-One_cluster_multiple_providers.md index 151188e45..4390c5dd2 100644 --- a/_posts/2022-07-08-One_cluster_multiple_providers.md +++ b/_posts/2022-07-08-One_cluster_multiple_providers.md @@ -6,41 +6,60 @@ categories: ["metal3", "cluster API", "provider", "hybrid", "edge"] author: Lennart Jern --- -Running on bare metal has both benefits and drawbacks. -You can get the best performance possible out of the hardware, but it can also be quite expensive and maybe not necessary for _all_ workloads. -Perhaps a hybrid cluster could give you the best of both? -Raw power for the workload that needs it, and cheap virtualized commodity for the rest. -This blog post will show how to set up a cluster like this using the Cluster API backed by the Metal3 and BYOH providers. +Running on bare metal has both benefits and drawbacks. You can get the +best performance possible out of the hardware, but it can also be quite +expensive and maybe not necessary for _all_ workloads. Perhaps a hybrid +cluster could give you the best of both? Raw power for the workload that +needs it, and cheap virtualized commodity for the rest. This blog post +will show how to set up a cluster like this using the Cluster API backed +by the Metal3 and BYOH providers. ## The problem -Imagine that you have some bare metal servers that you want to use for some specific workload. -Maybe the workload benefits from the specific hardware or there are some requirements that make it necessary to run it there. -The rest of the organization already uses Kubernetes and the cluster API everywhere so of course you want the same for this as well. +Imagine that you have some bare metal servers that you want to use for +some specific workload. Maybe the workload benefits from the specific +hardware or there are some requirements that make it necessary to run it +there. The rest of the organization already uses Kubernetes and the +cluster API everywhere so of course you want the same for this as well. Perfect, grab Metal³ and start working! -But hold on, this would mean that you use some of the servers for running the Kubernetes control plane and possibly all the cluster API controllers. -If there are enough servers this is probably not an issue, but do you really want to "waste" these servers on such generic workloads that could be running anywhere? -This can become especially painful if you need multiple control plane nodes. -Each server is probably powerful enough to run all the control planes and controllers, but it would be a single point of failure... +But hold on, this would mean that you use some of the servers for +running the Kubernetes control plane and possibly all the cluster API +controllers. If there are enough servers this is probably not an issue, +but do you really want to "waste" these servers on such generic +workloads that could be running anywhere? This can become especially +painful if you need multiple control plane nodes. Each server is +probably powerful enough to run all the control planes and controllers, +but it would be a single point of failure... -What if there was a way to use a different cluster API infrastructure provider for some nodes? -For example, use the Openstack infrastructure provider for the control plane and Metal³ for the workers. -Let's do an experiment! +What if there was a way to use a different cluster API infrastructure +provider for some nodes? For example, use the Openstack infrastructure +provider for the control plane and Metal³ for the workers. Let's do an +experiment! ## Setting up the experiment environment -This blog post will use the [Bring your own host](https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost) (BYOH) provider together with Metal³ as a proof of concept to show what is currently possible. +This blog post will use the [Bring your own +host](https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost) +(BYOH) provider together with Metal³ as a proof of concept to show what +is currently possible. The BYOH provider was chosen as the second provider for two reasons: -1. Due to its design (you provision the host yourself), it is very easy to adapt it to the test (e.g. use a VM in the same network that the metal3-dev-env uses). -2. It is one of the providers that is known to work when combining multiple providers for a single cluster. - -We will be using the [metal3-dev-env](https://github.com/metal3-io/metal3-dev-env) on Ubuntu as a starting point for this experiment. -Note that it makes substantial changes to the machine where it is running, so you may want to use a dedicated lab machine instead of your laptop for this. -If you have not done so already, clone it and run `make`. -This should give you a management cluster with the Metal³ provider installed and two BareMetalHosts ready for provisioning. +1. Due to its design (you provision the host yourself), it is very easy + to adapt it to the test (e.g. use a VM in the same network that the + metal3-dev-env uses). +2. It is one of the providers that is known to work when combining + multiple providers for a single cluster. + +We will be using the +[metal3-dev-env](https://github.com/metal3-io/metal3-dev-env) on Ubuntu +as a starting point for this experiment. Note that it makes substantial +changes to the machine where it is running, so you may want to use a +dedicated lab machine instead of your laptop for this. If you have not +done so already, clone it and run `make`. This should give you a +management cluster with the Metal³ provider installed and two +BareMetalHosts ready for provisioning. The next step is to add the BYOH provider and a ByoHost. @@ -86,9 +105,12 @@ Vagrant.configure("2") do |config| end ``` -Vagrant should now have created a new VM to use as a ByoHost. -Now we just need to run the BYOH agent in the VM to make it register as a ByoHost in the management cluster. -The BYOH agent needs a kubeconfig file to do this, so we start by copying it to the VM: +Vagrant should now have created a new VM to use as a ByoHost. Now we +just need to run the BYOH agent in the VM to make it register as a +ByoHost in the management cluster. The BYOH agent needs a kubeconfig +file to do this, so we start by copying it to the VM: + + ```bash {%- comment -%} @@ -104,8 +126,12 @@ scp -i .vagrant/machines/control-plane1/libvirt/private_key \ {% endraw %} ``` + + Next, install the prerequisites and host agent in the VM and run it. + + ```bash vagrant ssh sudo apt install -y socat ebtables ethtool conntrack @@ -115,7 +141,10 @@ chmod +x byoh-hostagent sudo ./byoh-hostagent --namespace metal3 --kubeconfig management-cluster.conf ``` -You should now have a management cluster with both the Metal³ and BYOH providers installed, as well as two BareMetalHosts and one ByoHost. + + +You should now have a management cluster with both the Metal³ and BYOH +providers installed, as well as two BareMetalHosts and one ByoHost. ```console $ kubectl -n metal3 get baremetalhosts,byohosts @@ -130,9 +159,9 @@ byohost.infrastructure.cluster.x-k8s.io/control-plane1 73s ## Creating a multi-provider cluster -The trick is to create both a Metal3Cluster and a ByoCluster that are owned by one common Cluster. -We will use the ByoCluster for the control plane in this case. -First the Cluster: +The trick is to create both a Metal3Cluster and a ByoCluster that are +owned by one common Cluster. We will use the ByoCluster for the control +plane in this case. First the Cluster: ```yaml apiVersion: cluster.x-k8s.io/v1beta1 @@ -165,11 +194,15 @@ Add the rest of the BYOH manifests to get a control plane. The code is collapsed here for easier reading. Please click on the line below to expand it. + +
KubeadmControlPlane, ByoCluster and ByoMachineTemplate
+ + ```yaml {%- raw %} apiVersion: controlplane.cluster.x-k8s.io/v1beta1 @@ -284,10 +317,12 @@ spec:
-So far this is a "normal" Cluster backed by the BYOH provider. -But now it is time to do something different. -Instead of adding more ByoHosts as workers, we will add a Metal3Cluster and MachineDeployment backed by BareMetalHosts! -Note that the `controlPlaneEndpoint` of the Metal3Cluster must point to the same endpoint that the ByoCluster is using. +So far this is a "normal" Cluster backed by the BYOH provider. But now +it is time to do something different. Instead of adding more ByoHosts as +workers, we will add a Metal3Cluster and MachineDeployment backed by +BareMetalHosts! Note that the `controlPlaneEndpoint` of the +Metal3Cluster must point to the same endpoint that the ByoCluster is +using. ```yaml apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -301,10 +336,14 @@ spec: noCloudProvider: true ``` + +
IPPools
+ + ```yaml apiVersion: ipam.metal3.io/v1alpha1 kind: IPPool @@ -335,9 +374,10 @@ spec:
-These manifests are quite large but they are just the same as would be used by the metal3-dev-env with some name changes here and there. -The key thing to note is that all references to a Cluster are to the one we defined above. -Here is the MachineDeployment: +These manifests are quite large but they are just the same as would be +used by the metal3-dev-env with some name changes here and there. The +key thing to note is that all references to a Cluster are to the one we +defined above. Here is the MachineDeployment: ```yaml apiVersion: cluster.x-k8s.io/v1beta1 @@ -374,14 +414,19 @@ spec: version: v1.23.5 ``` -Finally, we add the Metal3MachineTemplate, Metal3DataTemplate and KubeadmConfigTemplate. -Here you may want to add your public ssh key in the KubeadmConfigTemplate (the last few lines). +Finally, we add the Metal3MachineTemplate, Metal3DataTemplate and +KubeadmConfigTemplate. Here you may want to add your public ssh key in +the KubeadmConfigTemplate (the last few lines). + +
Metal3MachineTemplate, Metal3DataTemplate and KubeadmConfigTemplate
+ + ```yaml {%- raw %} apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -498,7 +543,8 @@ spec:
-The result of all this is a Cluster with two Machines, one from the Metal³ provider and one from the BYOH provider. +The result of all this is a Cluster with two Machines, one from the +Metal³ provider and one from the BYOH provider. ```console $ k -n metal3 get machine @@ -525,7 +571,8 @@ control-plane1 Ready control-plane,master 88m v1.23.5 test1-8767dbccd-24cl5 Ready 82m v1.23.5 ``` -Going back to the management cluster, we can inspect the state of the cluster API resources. +Going back to the management cluster, we can inspect the state of the +cluster API resources. ```console $ clusterctl -n metal3 describe cluster mixed-cluster @@ -542,37 +589,55 @@ Cluster/mixed-cluster True ## Conclusion -As we have seen in this post, it is possible to combine at least some infrastructure providers when creating a single cluster. -This can be useful for example if a provider has a high cost or limited resources. -Furthermore, the use case is not addressed by MachineDeployments since they would all be from the same provider (even though they can have different properties). - -There is some room for development and improvement though. -The most obvious thing is perhaps that Clusters only have one `infrastructureRef`. -This means that the cluster API controllers are not aware of the "secondary" infrastructure provider(s). - -Another thing that may be less obvious is the reliance on Nodes and Machines in the Kubeadm control plane provider. -It is not an issue in the example we have seen here since both Metal³ and BYOH creates Nodes. -However, there are some projects where Nodes are unnecessary. -See for example [Kamaji](https://github.com/clastix/kamaji), which aims to integrate with the cluster API. -The idea here is to run the control plane components in the management cluster as Pods. -Naturally, there would not be any control plane Nodes or Machines in this case. -(A second provider would be used to add workers.) -But the Kubeadm control plane provider expects there to be both Machines and Nodes for the control plane, so a new provider is likely needed to make this work as desired. - -This issue can already be seen in the [vcluster](https://github.com/loft-sh/cluster-api-provider-vcluster) provider, where the Cluster stays in `Provisioning` state because it is "Waiting for the first control plane machine to have its `status.nodeRef` set". -The idea with vcluster is to reuse the Nodes of the management cluster but provide a separate control plane. -This gives users better isolation than just namespaces without the need for another "real" cluster. -It is for example possible to have different custom resource definitions in each vcluster. -But since vcluster runs all the pods (including the control plane) in the management cluster, there will never be a control plane Machine or `nodeRef`. - -There is already one implementation of a control plane provider without Nodes, i.e. the EKS provider. -Perhaps this is the way forward. -One implementation for each specific case. -It would be nice if it was possible to do it in a more generic way though, similar to how the Kubeadm control plane provider is used by almost all infrastructure providers. - -To summarize, there is already some support for mixed clusters with multiple providers. -However, there are some issues that make it unnecessarily awkward. -Two things that could be improved in the cluster API would be the following: - -1. Make the `cluster.infrastructureRef` into a list to allow multiple infrastructure providers to be registered. -2. Drop the assumption that there will always be control plane Machines and Nodes (e.g. by implementing a new control plane provider). +As we have seen in this post, it is possible to combine at least some +infrastructure providers when creating a single cluster. This can be +useful for example if a provider has a high cost or limited resources. +Furthermore, the use case is not addressed by MachineDeployments since +they would all be from the same provider (even though they can have +different properties). + +There is some room for development and improvement though. The most +obvious thing is perhaps that Clusters only have one +`infrastructureRef`. This means that the cluster API controllers are not +aware of the "secondary" infrastructure provider(s). + +Another thing that may be less obvious is the reliance on Nodes and +Machines in the Kubeadm control plane provider. It is not an issue in +the example we have seen here since both Metal³ and BYOH creates Nodes. +However, there are some projects where Nodes are unnecessary. See for +example [Kamaji](https://github.com/clastix/kamaji), which aims to +integrate with the cluster API. The idea here is to run the control +plane components in the management cluster as Pods. Naturally, there +would not be any control plane Nodes or Machines in this case. (A second +provider would be used to add workers.) But the Kubeadm control plane +provider expects there to be both Machines and Nodes for the control +plane, so a new provider is likely needed to make this work as desired. + +This issue can already be seen in the +[vcluster](https://github.com/loft-sh/cluster-api-provider-vcluster) +provider, where the Cluster stays in `Provisioning` state because it is +"Waiting for the first control plane machine to have its +`status.nodeRef` set". The idea with vcluster is to reuse the Nodes of +the management cluster but provide a separate control plane. This gives +users better isolation than just namespaces without the need for another +"real" cluster. It is for example possible to have different custom +resource definitions in each vcluster. But since vcluster runs all the +pods (including the control plane) in the management cluster, there will +never be a control plane Machine or `nodeRef`. + +There is already one implementation of a control plane provider without +Nodes, i.e. the EKS provider. Perhaps this is the way forward. One +implementation for each specific case. It would be nice if it was +possible to do it in a more generic way though, similar to how the +Kubeadm control plane provider is used by almost all infrastructure +providers. + +To summarize, there is already some support for mixed clusters with +multiple providers. However, there are some issues that make it +unnecessarily awkward. Two things that could be improved in the cluster +API would be the following: + +1. Make the `cluster.infrastructureRef` into a list to allow multiple + infrastructure providers to be registered. +2. Drop the assumption that there will always be control plane Machines + and Nodes (e.g. by implementing a new control plane provider). diff --git a/hack/markdownlint.sh b/hack/markdownlint.sh new file mode 100755 index 000000000..29a8bb0d4 --- /dev/null +++ b/hack/markdownlint.sh @@ -0,0 +1,20 @@ +#!/bin/sh +# markdownlint-cli2 has config file(s) named .markdownlint-cli2.yaml in the repo + +set -eux + +IS_CONTAINER="${IS_CONTAINER:-false}" +CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-podman}" + +# all md files, but ignore .github and node_modules +if [ "${IS_CONTAINER}" != "false" ]; then + markdownlint-cli2 "**/*.md" "#.github" "#node_modules" +else + "${CONTAINER_RUNTIME}" run --rm \ + --env IS_CONTAINER=TRUE \ + --volume "${PWD}:/workdir:ro,z" \ + --entrypoint sh \ + --workdir /workdir \ + docker.io/pipelinecomponents/markdownlint-cli2:0.9.0@sha256:71370df6c967bae548b0bfd0ae313ddf44bfad87da76f88180eff55c6264098c \ + /workdir/hack/markdownlint.sh "$@" +fi diff --git a/hack/shellcheck.sh b/hack/shellcheck.sh new file mode 100755 index 000000000..9c83430fc --- /dev/null +++ b/hack/shellcheck.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +set -eux + +IS_CONTAINER="${IS_CONTAINER:-false}" +CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-podman}" + +if [ "${IS_CONTAINER}" != "false" ]; then + TOP_DIR="${1:-.}" + find "${TOP_DIR}" -path ./vendor -prune -o -name '*.sh' -exec shellcheck -s bash {} \+ +else + "${CONTAINER_RUNTIME}" run --rm \ + --env IS_CONTAINER=TRUE \ + --volume "${PWD}:/workdir:ro,z" \ + --entrypoint sh \ + --workdir /workdir \ + docker.io/koalaman/shellcheck-alpine:v0.9.0@sha256:e19ed93c22423970d56568e171b4512c9244fc75dd9114045016b4a0073ac4b7 \ + /workdir/hack/shellcheck.sh "$@" +fi