diff --git a/CHANGELOG-0.1.1.md b/CHANGELOG-0.1.1.md deleted file mode 100644 index cfcd3661b9..0000000000 --- a/CHANGELOG-0.1.1.md +++ /dev/null @@ -1,13 +0,0 @@ -# Changelog 0.1.1 - -## [0.1.1] - 2018-11-21 - -### Added - -- Alerts (AlertManager) installation and configuration -- HA Proxy - TLS Termination with multiple addresses and certificates -- PostgreSQL installation and configuration - -### Changed - -- Automatic restart of RedHat/Debian cluster diff --git a/CHANGELOG-0.1.2.md b/CHANGELOG-0.1.2.md deleted file mode 100644 index c4a6a7feb7..0000000000 --- a/CHANGELOG-0.1.2.md +++ /dev/null @@ -1,7 +0,0 @@ -# Changelog 0.1.2 - -## [0.1.2] - 2018-12-05 - -### Changed - -- Kubernetes v1.11.5 installation diff --git a/CHANGELOG-0.1.0.md b/CHANGELOG-0.1.md similarity index 65% rename from CHANGELOG-0.1.0.md rename to CHANGELOG-0.1.md index 546b57b4ee..0f222cd90b 100644 --- a/CHANGELOG-0.1.0.md +++ b/CHANGELOG-0.1.md @@ -1,6 +1,24 @@ # Changelog 0.1 -## [0.1.0] - 2018-10-19 +## [0.1.2] 2018-12-05 + +### Changed + +- Kubernetes v1.11.5 installation + +## [0.1.1] 2018-11-21 + +### Added + +- Alerts (AlertManager) installation and configuration +- HA Proxy - TLS Termination with multiple addresses and certificates +- PostgreSQL installation and configuration + +### Changed + +- Automatic restart of RedHat/Debian cluster + +## [0.1.0] 2018-10-19 ### Added @@ -21,3 +39,4 @@ - Log rotation for: Kibana, Kubernetes, Elasticsearch, HAProxy, Kafka - Automation for firewall rules on RedHat - Create VPN network on Azure using Epiphany + diff --git a/CHANGELOG-0.3.md b/CHANGELOG-0.3.md new file mode 100644 index 0000000000..be8dcba995 --- /dev/null +++ b/CHANGELOG-0.3.md @@ -0,0 +1,26 @@ +# Changelog 0.3 + +## [0.3.0] 2019-07-31 + +### Added + +- Support for AWS cloud platform +- New Python based CLI - epicli. Currently supports AWS and baremetal deployments only +- Kubernetes automatic upgrade (experimental) +- Server spec tests for cluster components +- Added Canal as network plugin for Kubernetes +- Improved security + +### Changed + +- Kubernetes version 1.14.4 +- Documentation cleanup and updates + +### Fixed + +- Fixed vulnerabilities for KeyCloak examples + +### Known issues + +- Deployment/Application role fails because Kubernetes cluster is not ready after reboot. More info [here](https://github.com/epiphany-platform/epiphany/issues/407) +- Node_exporter ports are not present in defaults resulting in Prometheus not beeing able to scrape data with minimal cluster data.yaml. More info [here](https://github.com/epiphany-platform/epiphany/issues/410) \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index ce7bdf332e..341b42f495 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,15 +5,25 @@ All notable changes to this project will be documented in linked files. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +Reference for actual cluster component versions can be found [here](docs/home/COMPONENTS.md) + ## Current release +### 0.3.x + +- [CHANGELOG-0.3.0](./CHANGELOG-0.3.md#030-2019-07-31) + +## Older releases + +### 0.2.x + - [CHANGELOG-0.2.3](./CHANGELOG-0.2.md#023-2019-05-20) - [CHANGELOG-0.2.2](./CHANGELOG-0.2.md#022-2019-03-29) - [CHANGELOG-0.2.1](./CHANGELOG-0.2.md#021-2019-03-07) - [CHANGELOG-0.2.0](./CHANGELOG-0.2.md#020-2019-02-19) -## Older releases +### 0.1.x -- [CHANGELOG-0.1.2](./CHANGELOG-0.1.2.md) -- [CHANGELOG-0.1.1](./CHANGELOG-0.1.1.md) -- [CHANGELOG-0.1.0](./CHANGELOG-0.1.0.md) +- [CHANGELOG-0.1.2](./CHANGELOG-0.1.md#012-2018-12-05) +- [CHANGELOG-0.1.1](./CHANGELOG-0.1.md#011-2018-11-21) +- [CHANGELOG-0.1.0](./CHANGELOG-0.1.md#010-2018-10-19) diff --git a/LICENSES.md b/LICENSES.md deleted file mode 100644 index 744fe2e55f..0000000000 --- a/LICENSES.md +++ /dev/null @@ -1,627 +0,0 @@ -# Licenses of Epiphany dependencies - -## flannel - -[Github repository](https://github.com/coreos/flannel/) - -``` - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` - -## kops - -[Github repository](http://github.com/kubernetes/kops/) - -### License text: -``` - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` - -## prometheus - -[Github repository](https://github.com/prometheus/prometheus/) - -### License text: -``` - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -``` diff --git a/README.md b/README.md index b648e06820..0165763cf7 100644 --- a/README.md +++ b/README.md @@ -2,14 +2,55 @@ ## Overview -Epiphany at its core is a full automation of Kubernetes and Docker plus additional builtin services such as Kafka for high speed messaging/events, Prometheus for monitoring and Graphana for dashboards, Elasticsearch and Kibana for centralized logging. Other optional services are being evaluated now. +Epiphany at its core is a full automation of Kubernetes and Docker plus additional builtin services/components like: + +- Kafka or RabbitMQ for high speed messaging/events +- Prometheus and Alertmanager for monitoring with Graphana for visualization +- Elasticsearch and Kibana for centralized logging +- HAProxy for loadbalancing +- Postgress for storage +- KeyCloak for authentication Epiphany can run on as few as one node (laptop, desktop, server) but the real value comes from running 3 or more nodes for scale and HA. Nodes can be added or removed at will depending on data in the manifest. Everything is data driven so simply changing the manifest data and running the automation will modify the environment. We currently use Terraform and Ansible for our automation orchestration. All automation is idempotent so you can run it as many times as you wish and it will maintain the same state unless you change the data. If someone makes a "snow flake" change to the environment (you should never do this) then simply running the automation again will put the environment back to the desired state. +## Legacy note + +In Epiphany 0.3 a new CLI tool was introduced (epicli) for deploying and managing clusters and currently supports AWS and bare metal deployment. Azure support will be added soon in a subsequent release but for now if you are in need for deploying a cluster on Azure use the older Legacy engine. + ## Quickstart +### Epicli + +Use the following command to see a full run-down of all commands and flags: + +```shell +epicli --help +``` + +Generate a new minimum cluster definition: + +```shell +epicli init -p aws -n demo +``` + +This minimum file definition is fine to start with, if you need more control over the infrastructure created you can also create a full definition: + +```shell +epicli init -p aws -n demo --full +``` + +You will need to modify a few values (like you AWS secrets, directory path for ssh keys). Once you are done done with `demo.yaml` you can start cluster deployment by executing with: + +```shell +epicli apply -f demo.yaml +``` + +Find more information using table of contents below - especially the [How-to guides](docs/home/HOWTO.md). + +### Legacy + Fork the `epiphany` repository and modify the yaml's under `core/data/` directory. For example in `data/azure/infrastructure/epiphany-playground/basic-data.yaml` file you will need to modify a few values (like you Azure subscription name, directory path for ssh keys). Once you are done done with `basic-data.yaml` you can execute Epiphany with the command: ```shell @@ -41,7 +82,7 @@ Find more information using table of contents below - especially the [How-to gui - [How-to contribute](docs/home/CONTRIBUTING.md) - [Workflow to follow](docs/home/GITWORKFLOW.md) - [Governance model](docs/home/GOVERNANCE.md) - - [Notices](docs/home/NOTICES.md) + - [Components](docs/home/COMPONENTS.md) - [Changelog](CHANGELOG.md) diff --git a/core/README.md b/core/README.md deleted file mode 100644 index 29e95a9ad9..0000000000 --- a/core/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Core - -This folder contains the Epiphany core. - -For the full story, go to [Epiphany documentation](../README.md). \ No newline at end of file diff --git a/core/bin/gen_docs.sh b/core/bin/gen_docs.sh deleted file mode 100755 index 17ddfd56c3..0000000000 --- a/core/bin/gen_docs.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -# -# Copyright 2019 ABB. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# gen_docs.sh creates the doc subdirectory trees and then copies the required *.md files to the given folder overridding -# whatever is those doc directories. The VSTS Wiki allow for documentation from *.md files so restricting to only /docs -# folder allows for a more controlled environment. - -# NOTE: There needs to be templates created and data entered for the docs just like the other areas of Epiphany. - -# Exit immediately if something goes wrong. -set -eu - -# Get the root of the Epiphany repo -export REPO_ROOT=$(git rev-parse --show-toplevel)/core - -mkdir -p $REPO_ROOT/docs/home -mkdir -p $REPO_ROOT/docs/architecture -mkdir -p $REPO_ROOT/docs/core -mkdir -p $REPO_ROOT/docs/core-extensions -mkdir -p $REPO_ROOT/docs/data -mkdir -p $REPO_ROOT/docs/examples -mkdir -p $REPO_ROOT/docs/extras - -cp $REPO_ROOT/architecture/docs/index.md $REPO_ROOT/docs/architecture/ -cp $REPO_ROOT/core/docs/index.md $REPO_ROOT/docs/core/ -cp $REPO_ROOT/core-extensions/docs/index.md $REPO_ROOT/docs/core-extensions/ -cp $REPO_ROOT/data/docs/index.md $REPO_ROOT/docs/data/ -cp $REPO_ROOT/examples/docs/index.md $REPO_ROOT/docs/examples/ -cp $REPO_ROOT/extras/docs/index.md $REPO_ROOT/docs/extras/ - -cp $REPO_ROOT/*.md $REPO_ROOT/docs/home/ - -echo 'Docs generated...' diff --git a/core/core/docs/README.md b/core/core/docs/README.md deleted file mode 100644 index 03fcd8eed0..0000000000 --- a/core/core/docs/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Core Docs - -Holds the documentation specifically for `core`. This data is used to generate the overall /docs folder for `core` documentation that is also supported by the VSTS Wiki. diff --git a/core/core/docs/index.md b/core/core/docs/index.md deleted file mode 100644 index 6961c93188..0000000000 --- a/core/core/docs/index.md +++ /dev/null @@ -1 +0,0 @@ -# Core Index diff --git a/core/core/src/ansible/backup.yml b/core/core/src/ansible/backup.yml new file mode 100644 index 0000000000..ebcfac0653 --- /dev/null +++ b/core/core/src/ansible/backup.yml @@ -0,0 +1,9 @@ +--- +# Ansible playbook for backing up Kubernetes cluster + +- hosts: master + serial: 1 + become: true + become_method: sudo + roles: + - backup diff --git a/core/core/src/ansible/recovery.yml b/core/core/src/ansible/recovery.yml new file mode 100644 index 0000000000..95d34cf4f5 --- /dev/null +++ b/core/core/src/ansible/recovery.yml @@ -0,0 +1,9 @@ +--- +# Ansible playbook for recovering Kubernetes cluster + +- hosts: master + serial: 1 + become: true + become_method: sudo + roles: + - recovery diff --git a/core/core/src/ansible/roles/backup/defaults/main.yml b/core/core/src/ansible/roles/backup/defaults/main.yml new file mode 100644 index 0000000000..7ae2f6b6dd --- /dev/null +++ b/core/core/src/ansible/roles/backup/defaults/main.yml @@ -0,0 +1,2 @@ +--- +backup_dir: /home/{{ admin_user.name }}/backupdir diff --git a/core/core/src/ansible/roles/backup/tasks/main.yml b/core/core/src/ansible/roles/backup/tasks/main.yml new file mode 100644 index 0000000000..582290f177 --- /dev/null +++ b/core/core/src/ansible/roles/backup/tasks/main.yml @@ -0,0 +1,69 @@ +--- +- name: Create a backup directory + file: + path: "{{ backup_dir }}" + state: directory + +# Ansible 2.8 +# - name: Backup certificates +# copy: +# src: /etc/kubernetes/pki +# dest: "{{ backup_dir }}/tmp" +# remote_src: yes + +# Ansible 2.7 +- name: Backup certificates + synchronize: + src: /etc/kubernetes/pki + dest: "{{ backup_dir }}/tmp" + recursive: yes + delegate_to: "{{ inventory_hostname }}" + +- name: Get etcd image name + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get pods --all-namespaces -o=jsonpath="{.items[*].spec.containers[?(@.name=='etcd')].image}" + register: etcd_image_name + +- name: Save etcd image name to file + copy: + content: "{{ etcd_image_name.stdout }}" + dest: "{{ backup_dir }}/tmp/etcd_ver.txt" + +- name: Create etcd snapshot + shell: > + docker run -v "{{ backup_dir }}/tmp":/backup \ + --network host \ + --env ETCDCTL_API=3 \ + --rm {{ etcd_image_name.stdout }} \ + etcdctl --endpoints=https://127.0.0.1:2379 \ + --cacert=/backup/pki/etcd/ca.crt \ + --cert=/backup/pki/etcd/healthcheck-client.crt \ + --key=/backup/pki/etcd/healthcheck-client.key \ + snapshot save /backup/etcd-snapshot.db + +- name: Check if kubeadm configuration file exists + stat: + path: /etc/kubeadm/kubeadm-config.yml + register: stat_result + +- name: Backup kubeadm configuration file + copy: + src: /etc/kubeadm/kubeadm-config.yml + dest: "{{ backup_dir }}/tmp" + remote_src: yes + when: stat_result.stat.exists + +- name: Set variable with current timestamp + set_fact: timestamp="{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Create a tar gz archive + archive: + path: "{{ backup_dir }}/tmp/" + dest: "{{ backup_dir }}/k8s_backup_{{ timestamp }}.tar.gz" + format: gz + +- name: Clean temporary directory + file: + state: absent + path: "{{ backup_dir }}/tmp/" diff --git a/core/core/src/ansible/roles/docker/tasks/Debian.yml b/core/core/src/ansible/roles/docker/tasks/Debian.yml index 7dddbaf9b9..2d1c4b315f 100644 --- a/core/core/src/ansible/roles/docker/tasks/Debian.yml +++ b/core/core/src/ansible/roles/docker/tasks/Debian.yml @@ -1,7 +1,26 @@ --- +- name: Ensure dependencies are installed + apt: + name: + - apt-transport-https + - ca-certificates + state: present + +- name: Add Docker apt key + apt_key: + url: "https://download.docker.com/linux/{{ ansible_distribution|lower }}/gpg" + id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 + state: present + register: add_repository_key + +- name: Add Docker-CE stable repo + apt_repository: + repo: "deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} stable" + state: present + update_cache: true + - name: Install Docker apt: - name: docker.io + name: "docker-ce=5:18.09.6~3-0~{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}" state: present - update_cache: yes - when: ansible_os_family == "Debian" + update_cache: true \ No newline at end of file diff --git a/core/core/src/ansible/roles/docker/tasks/RedHat.yml b/core/core/src/ansible/roles/docker/tasks/RedHat.yml index 6c79738ab0..eeee04aa9d 100644 --- a/core/core/src/ansible/roles/docker/tasks/RedHat.yml +++ b/core/core/src/ansible/roles/docker/tasks/RedHat.yml @@ -7,6 +7,14 @@ gpgkey: https://download.docker.com/linux/centos/gpg gpgcheck: yes +# Workaround for problems with container selinux. +# Todo: Investigate problems with container selinux on Azure. +- name: Install container packages + yum: + name: http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.95-2.el7_6.noarch.rpm + state: present + update_cache: yes + - name: Install Docker yum: name: docker-ce-18.06.3.ce-3.el7 diff --git a/core/core/src/ansible/roles/haproxy_exporter/templates/prometheus-haproxy-exporter.service.j2 b/core/core/src/ansible/roles/haproxy_exporter/templates/prometheus-haproxy-exporter.service.j2 index d60fe11a4f..9d41e90027 100644 --- a/core/core/src/ansible/roles/haproxy_exporter/templates/prometheus-haproxy-exporter.service.j2 +++ b/core/core/src/ansible/roles/haproxy_exporter/templates/prometheus-haproxy-exporter.service.j2 @@ -4,7 +4,7 @@ Description=Service that runs Prometheus Node Exporter [Service] User=haproxy_exporter Group=haproxy_exporter -ExecStart=/opt/haproxy_exporter/haproxy_exporter "--haproxy.scrape-uri=https://{{ haproxy.stats.user }}:{{ haproxy.stats.password }}@{{ ansible_default_ipv4.address }}/haproxy?stats" +ExecStart=/opt/haproxy_exporter/haproxy_exporter "--haproxy.scrape-uri=http://{{ haproxy.stats.user }}:{{ haproxy.stats.password }}@127.0.0.1:9000/haproxy?stats" SyslogIdentifier=prometheus_haproxy_exporter Restart=always diff --git a/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_Debian.cfg.j2 b/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_Debian.cfg.j2 index 82229e2376..55f32f599f 100644 --- a/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_Debian.cfg.j2 +++ b/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_Debian.cfg.j2 @@ -10,6 +10,12 @@ global user haproxy group haproxy daemon + + ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + + ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets defaults log global @@ -22,8 +28,7 @@ defaults {%- if haproxy is defined and haproxy.http_request_timeout is defined %} timeout http-request {{ haproxy.http_request_timeout }} {%- endif %} - - + {%- for front in haproxy.frontend %} frontend {{ front.name }} {%- if front.https == True %} @@ -32,13 +37,6 @@ frontend {{ front.name }} {%- if front.https == False %} bind *:{{ front.port }} {%- endif %} - - {%- if haproxy.stats is defined %} - {%- if haproxy.stats.enable %} - stats uri /haproxy?stats - stats auth {{ haproxy.stats.user }}:{{ haproxy.stats.password }} - {%- endif %} - {%- endif %} {%- if (front.domain_backend_mapping is defined) and (front.domain_backend_mapping > 0) %} {%- if front.https == True %} @@ -46,10 +44,10 @@ frontend {{ front.name }} acl {{ mapping.backend }} ssl_fc_sni {{ mapping.domain }} {%- endfor %} {%- endif %} - {%- if front.https == False %} - {%- for mapping in front.domain_backend_mapping %} - acl {{ mapping.backend }} hdr_dom(host) -i {{ mapping.domain }} - {%- endfor %} + {%- if front.https == False %} + {%- for mapping in front.domain_backend_mapping %} + acl {{ mapping.backend }} hdr_dom(host) -i {{ mapping.domain }} + {%- endfor %} {%- endif %} {%- endif %} @@ -70,7 +68,7 @@ backend {{ back.name }} {%- if back.server_groups is defined %} {%- for server_group in back.server_groups %} {%- for server in groups[server_group] %} - server {{ server }} {{ hostvars[server]['ansible_default_ipv4']['address'] }}:{{ back.port }} check + server {{ server }} {{ hostvars[server]['ansible_default_ipv4']['address'] }}:{{ back.port }} check {%- endfor %} {%- endfor %} {%- endif %} @@ -81,3 +79,16 @@ backend {{ back.name }} {%- endfor %} {%- endif %} {%- endfor %} + +{%- if haproxy.stats is defined %} + {%- if haproxy.stats.enable %} +listen stats + bind 127.0.0.1:9000 + stats enable + stats refresh 10s + stats admin if { src 127.0.0.1 } + stats hide-version # Hide HAProxy version + stats uri /haproxy?stats + stats auth {{ haproxy.stats.user }}:{{ haproxy.stats.password }} + {%- endif %} +{%- endif %} diff --git a/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_RedHat.cfg.j2 b/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_RedHat.cfg.j2 index 45b3409e30..a0327576ba 100644 --- a/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_RedHat.cfg.j2 +++ b/core/core/src/ansible/roles/haproxy_tls_termination/templates/haproxy_RedHat.cfg.j2 @@ -10,6 +10,12 @@ global user haproxy group haproxy daemon + + ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets + + ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets defaults log global @@ -34,13 +40,6 @@ frontend {{ front.name }} {%- if front.https == False %} bind *:{{ front.port }} {%- endif %} - - {%- if haproxy.stats is defined %} - {%- if haproxy.stats.enable %} - stats uri /haproxy?stats - stats auth {{ haproxy.stats.user }}:{{ haproxy.stats.password }} - {%- endif %} - {%- endif %} {%- if (front.domain_backend_mapping is defined) and (front.domain_backend_mapping > 0) %} {%- if front.https == True %} @@ -85,3 +84,16 @@ backend {{ back.name }} {%- endif %} {%- endfor %} + +{%- if haproxy.stats is defined %} + {%- if haproxy.stats.enable %} +listen stats + bind 127.0.0.1:9000 + stats enable + stats refresh 10s + stats admin if { src 127.0.0.1 } + stats hide-version # Hide HAProxy version + stats uri /haproxy?stats + stats auth {{ haproxy.stats.user }}:{{ haproxy.stats.password }} + {%- endif %} +{%- endif %} diff --git a/core/core/src/ansible/roles/kubernetes-common/tasks/configure-kubelet.yml b/core/core/src/ansible/roles/kubernetes-common/tasks/configure-kubelet.yml index 156388ff1f..ce30d5e892 100644 --- a/core/core/src/ansible/roles/kubernetes-common/tasks/configure-kubelet.yml +++ b/core/core/src/ansible/roles/kubernetes-common/tasks/configure-kubelet.yml @@ -1,6 +1,12 @@ --- # These tasks are run from master and worker roles +- name: Ensure kubelet drop-in directory exists + become: true + file: + path: /etc/systemd/system/kubelet.service.d + state: directory + - name: Copy kubelet configuration file (11-cgroup.conf) template: src: 11-cgroup.conf.j2 diff --git a/core/core/src/ansible/roles/kubernetes-common/tasks/install-packages.yml b/core/core/src/ansible/roles/kubernetes-common/tasks/install-packages.yml index 85709e504a..2120a49d8f 100644 --- a/core/core/src/ansible/roles/kubernetes-common/tasks/install-packages.yml +++ b/core/core/src/ansible/roles/kubernetes-common/tasks/install-packages.yml @@ -13,7 +13,6 @@ - name: Install Kubernetes packages for RedHat family yum: name: - - kubernetes-cni-0.6.0-0 # todo remove it in future release - kubelet-{{kubernetes.version}} - kubectl-{{kubernetes.version}} - kubeadm-{{kubernetes.version}} @@ -24,7 +23,6 @@ - name: Install Kubernetes packages for Debian family apt: name: - - kubernetes-cni=0.6.0-00 # todo remove it in future release - kubelet={{kubernetes.version}}-00 - kubectl={{kubernetes.version}}-00 - kubeadm={{kubernetes.version}}-00 diff --git a/core/core/src/ansible/roles/master/defaults/main.yml b/core/core/src/ansible/roles/master/defaults/main.yml index 06f674d130..a16f3db29a 100644 --- a/core/core/src/ansible/roles/master/defaults/main.yml +++ b/core/core/src/ansible/roles/master/defaults/main.yml @@ -16,3 +16,6 @@ kubelet_custom_config: systemReserved: cpu: 50m memory: 768Mi # based on RedHat 7.5 on Standard_DS1_v2 Azure VM with =~ 30 pods + +kubernetes_specification: + cni_plugin: flannel \ No newline at end of file diff --git a/core/core/src/ansible/roles/master/files/calico.yml b/core/core/src/ansible/roles/master/files/calico.yml new file mode 100644 index 0000000000..68233a3b7e --- /dev/null +++ b/core/core/src/ansible/roles/master/files/calico.yml @@ -0,0 +1,782 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use + veth_mtu: "1440" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } + +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: calico/cni:v3.8.0 + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: calico/cni:v3.8.0 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: calico/pod2daemon-flexvol:v3.8.0 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: calico/node:v3.8.0 + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + httpGet: + path: /liveness + port: 9099 + host: localhost + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -bird-ready + - -felix-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-kube-controllers.yaml + +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: calico/kube-controllers:v3.8.0 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml \ No newline at end of file diff --git a/core/core/src/ansible/roles/master/files/canal.yml b/core/core/src/ansible/roles/master/files/canal.yml new file mode 100644 index 0000000000..86013243ef --- /dev/null +++ b/core/core/src/ansible/roles/master/files/canal.yml @@ -0,0 +1,606 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Canal installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: canal-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "" + + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "true" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } + + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } + +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update +--- +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system + +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: canal + namespace: kube-system + labels: + k8s-app: canal +spec: + selector: + matchLabels: + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: canal + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: calico/cni:v3.8.1 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: canal-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: calico/pod2daemon-flexvol:v3.8.1 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + containers: + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: calico/node:v3.8.1 + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Configure route aggregation based on pod CIDR. + - name: USE_POD_CIDR + value: "true" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,canal" + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: "60" + # No IP address needed. + - name: IP + value: "" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + - name: CALICO_IPV4POOL_CIDR + value: "10.244.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + httpGet: + path: /liveness + port: 9099 + host: localhost + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # This container runs flannel using the kube-subnet-mgr backend + # for allocating subnets. + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: canal-config + key: canal_iface + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: canal-config + key: masquerade + volumeMounts: + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + # Used by canal. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used by flannel. + - name: flannel-cfg + configMap: + name: canal-config + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system + +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-kube-controllers.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml diff --git a/core/core/src/ansible/roles/master/files/coredns-config.yml b/core/core/src/ansible/roles/master/files/coredns-config.yml index ea4743873f..79b81dc37a 100644 --- a/core/core/src/ansible/roles/master/files/coredns-config.yml +++ b/core/core/src/ansible/roles/master/files/coredns-config.yml @@ -1,3 +1,49 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system --- apiVersion: v1 kind: ConfigMap @@ -9,23 +55,24 @@ data: .:53 { errors health + ready hosts { fallthrough } kubernetes cluster.local in-addr.arpa ip6.arpa { - pods insecure - upstream - fallthrough in-addr.arpa ip6.arpa + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa } prometheus :9153 - proxy . /etc/resolv.conf + forward . /etc/resolv.conf cache 30 loop reload loadbalance } --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: coredns @@ -47,6 +94,7 @@ spec: labels: k8s-app: kube-dns spec: + priorityClassName: system-cluster-critical serviceAccountName: coredns tolerations: - key: "CriticalAddonsOnly" @@ -55,7 +103,7 @@ spec: beta.kubernetes.io/os: linux containers: - name: coredns - image: coredns/coredns:1.2.6 + image: coredns/coredns:1.5.0 imagePullPolicy: IfNotPresent resources: limits: @@ -98,6 +146,11 @@ spec: timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP dnsPolicy: Default volumes: - name: config-volume @@ -110,4 +163,30 @@ spec: hostPath: path: /etc/hosts type: File ---- \ No newline at end of file +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.96.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP \ No newline at end of file diff --git a/core/core/src/ansible/roles/master/files/kube-flannel.yml b/core/core/src/ansible/roles/master/files/kube-flannel.yml index 929d1530f2..859d55bb61 100644 --- a/core/core/src/ansible/roles/master/files/kube-flannel.yml +++ b/core/core/src/ansible/roles/master/files/kube-flannel.yml @@ -1,9 +1,60 @@ --- +apiVersion: extensions/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unsed in CaaSP + rule: 'RunAsAny' +--- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: flannel rules: + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] - apiGroups: - "" resources: @@ -103,7 +154,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.10.0-amd64 + image: quay.io/coreos/flannel:v0.11.0-amd64 command: - cp args: @@ -117,7 +168,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.10.0-amd64 + image: quay.io/coreos/flannel:v0.11.0-amd64 command: - /opt/bin/flanneld args: @@ -131,7 +182,9 @@ spec: cpu: "100m" memory: "50Mi" securityContext: - privileged: true + privileged: false + capabilities: + add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: @@ -143,13 +196,13 @@ spec: fieldPath: metadata.namespace volumeMounts: - name: run - mountPath: /run + mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: - path: /run + path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d @@ -181,7 +234,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.10.0-arm64 + image: quay.io/coreos/flannel:v0.11.0-arm64 command: - cp args: @@ -195,7 +248,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.10.0-arm64 + image: quay.io/coreos/flannel:v0.11.0-arm64 command: - /opt/bin/flanneld args: @@ -209,7 +262,9 @@ spec: cpu: "100m" memory: "50Mi" securityContext: - privileged: true + privileged: false + capabilities: + add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: @@ -221,13 +276,13 @@ spec: fieldPath: metadata.namespace volumeMounts: - name: run - mountPath: /run + mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: - path: /run + path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d @@ -259,7 +314,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.10.0-arm + image: quay.io/coreos/flannel:v0.11.0-arm command: - cp args: @@ -273,7 +328,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.10.0-arm + image: quay.io/coreos/flannel:v0.11.0-arm command: - /opt/bin/flanneld args: @@ -287,7 +342,9 @@ spec: cpu: "100m" memory: "50Mi" securityContext: - privileged: true + privileged: false + capabilities: + add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: @@ -299,13 +356,13 @@ spec: fieldPath: metadata.namespace volumeMounts: - name: run - mountPath: /run + mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: - path: /run + path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d @@ -337,7 +394,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.10.0-ppc64le + image: quay.io/coreos/flannel:v0.11.0-ppc64le command: - cp args: @@ -351,7 +408,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.10.0-ppc64le + image: quay.io/coreos/flannel:v0.11.0-ppc64le command: - /opt/bin/flanneld args: @@ -365,7 +422,9 @@ spec: cpu: "100m" memory: "50Mi" securityContext: - privileged: true + privileged: false + capabilities: + add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: @@ -377,13 +436,13 @@ spec: fieldPath: metadata.namespace volumeMounts: - name: run - mountPath: /run + mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: - path: /run + path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d @@ -415,7 +474,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.10.0-s390x + image: quay.io/coreos/flannel:v0.11.0-s390x command: - cp args: @@ -429,7 +488,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.10.0-s390x + image: quay.io/coreos/flannel:v0.11.0-s390x command: - /opt/bin/flanneld args: @@ -443,7 +502,9 @@ spec: cpu: "100m" memory: "50Mi" securityContext: - privileged: true + privileged: false + capabilities: + add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: @@ -455,13 +516,13 @@ spec: fieldPath: metadata.namespace volumeMounts: - name: run - mountPath: /run + mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: - path: /run + path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d diff --git a/core/core/src/ansible/roles/master/files/kubernetes-dashboard.yml b/core/core/src/ansible/roles/master/files/kubernetes-dashboard.yml index a5a08b4177..a33a5c2515 100644 --- a/core/core/src/ansible/roles/master/files/kubernetes-dashboard.yml +++ b/core/core/src/ansible/roles/master/files/kubernetes-dashboard.yml @@ -12,12 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Configuration to deploy release version of the Dashboard UI compatible with -# Kubernetes 1.8. -# -# Example usage: kubectl create -f +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard -# ------------------- Dashboard Secret ------------------- # +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- apiVersion: v1 kind: Secret @@ -25,82 +51,130 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs - namespace: kube-system + namespace: kubernetes-dashboard type: Opaque --- -# ------------------- Dashboard Service Account ------------------- # apiVersion: v1 -kind: ServiceAccount +kind: Secret metadata: labels: k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard --- -# ------------------- Dashboard Role & Role Binding ------------------- # kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kubernetes-dashboard-minimal - namespace: kube-system + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard rules: - # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. -- apiGroups: [""] - resources: ["secrets"] - verbs: ["create"] - # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["create"] # Allow Dashboard to get, update and delete Dashboard exclusive secrets. -- apiGroups: [""] - resources: ["secrets"] - resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] - verbs: ["get", "update", "delete"] - # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. -- apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["kubernetes-dashboard-settings"] - verbs: ["get", "update"] - # Allow Dashboard to get metrics from heapster. -- apiGroups: [""] - resources: ["services"] - resourceNames: ["heapster"] - verbs: ["proxy"] -- apiGroups: [""] - resources: ["services/proxy"] - resourceNames: ["heapster", "http:heapster:", "https:heapster:"] - verbs: ["get"] + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] --- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: kubernetes-dashboard-minimal - namespace: kube-system + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: kubernetes-dashboard-minimal + name: kubernetes-dashboard subjects: -- kind: ServiceAccount + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole name: kubernetes-dashboard - namespace: kube-system +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard --- -# ------------------- Dashboard Deployment ------------------- # kind: Deployment -apiVersion: apps/v1beta2 +apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: kube-system + namespace: kubernetes-dashboard spec: replicas: 1 revisionHistoryLimit: 10 @@ -113,55 +187,95 @@ spec: k8s-app: kubernetes-dashboard spec: containers: - - name: kubernetes-dashboard - image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0 - ports: - - containerPort: 8443 - protocol: TCP - args: - - --auto-generate-certificates - # Uncomment the following line to manually specify Kubernetes API server Host - # If not specified, Dashboard will attempt to auto discover the API server and connect - # to it. Uncomment only if the default does not work. - # - --apiserver-host=http://my-address:port - volumeMounts: - - name: kubernetes-dashboard-certs - mountPath: /certs - # Create on-disk volume to store exec logs - - mountPath: /tmp - name: tmp-volume - livenessProbe: - httpGet: - scheme: HTTPS - path: / - port: 8443 - initialDelaySeconds: 30 - timeoutSeconds: 30 + - name: kubernetes-dashboard + image: kubernetesui/dashboard:v2.0.0-beta1 + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 volumes: - - name: kubernetes-dashboard-certs - secret: - secretName: kubernetes-dashboard-certs - - name: tmp-volume - emptyDir: {} + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} serviceAccountName: kubernetes-dashboard # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule --- -# ------------------- Dashboard Service ------------------- # kind: Service apiVersion: v1 metadata: labels: - k8s-app: kubernetes-dashboard - name: kubernetes-dashboard - namespace: kube-system + k8s-app: kubernetes-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard spec: ports: - - port: 443 - targetPort: 8443 + - port: 8000 + targetPort: 8000 selector: - k8s-app: kubernetes-dashboard + k8s-app: kubernetes-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-metrics-scraper + name: kubernetes-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-metrics-scraper + template: + metadata: + labels: + k8s-app: kubernetes-metrics-scraper + spec: + containers: + - name: kubernetes-metrics-scraper + image: kubernetesui/metrics-scraper:v1.0.0 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule \ No newline at end of file diff --git a/core/core/src/ansible/roles/master/tasks/single-master.yml b/core/core/src/ansible/roles/master/tasks/single-master.yml index 5e51f7a713..83c0b8cf04 100644 --- a/core/core/src/ansible/roles/master/tasks/single-master.yml +++ b/core/core/src/ansible/roles/master/tasks/single-master.yml @@ -43,6 +43,23 @@ dest: /home/{{ admin_user.name }}/ owner: "{{ admin_user.name }}" group: "{{ admin_user.name }}" + when: kubernetes_specification.cni_plugin == "flannel" + +- name: Copy calico config + copy: + src: calico.yml + dest: /home/{{ admin_user.name }}/ + owner: "{{ admin_user.name }}" + group: "{{ admin_user.name }}" + when: kubernetes_specification.cni_plugin == "calico" + +- name: Copy canal config + copy: + src: canal.yml + dest: /home/{{ admin_user.name }}/ + owner: "{{ admin_user.name }}" + group: "{{ admin_user.name }}" + when: kubernetes_specification.cni_plugin == "canal" - name: Copy dashboard yaml copy: @@ -86,6 +103,17 @@ - name: Apply kube-flannel.yml shell: kubectl apply --kubeconfig=/home/{{ admin_user.name }}/.kube/config -f /home/{{ admin_user.name }}/kube-flannel.yml become_user: "{{ admin_user.name }}" + when: kubernetes_specification.cni_plugin == "flannel" + +- name: Apply calico + shell: kubectl apply --kubeconfig=/home/{{ admin_user.name }}/.kube/config -f /home/{{ admin_user.name }}/calico.yml + become_user: "{{ admin_user.name }}" + when: kubernetes_specification.cni_plugin == "calico" + +- name: Apply canal + shell: kubectl apply --kubeconfig=/home/{{ admin_user.name }}/.kube/config -f /home/{{ admin_user.name }}/canal.yml + become_user: "{{ admin_user.name }}" + when: kubernetes_specification.cni_plugin == "canal" - name: Check if kubernetes-dashboard is already deployed shell: kubectl --kubeconfig=/home/{{ admin_user.name }}/.kube/config get pods --all-namespaces | grep -c -i dashboard @@ -142,4 +170,4 @@ become_user: "{{ admin_user.name }}" when: - groups['master'][0] == inventory_hostname - - untaint_master is defined and untaint_master == True \ No newline at end of file + - untaint_master is defined and untaint_master == True diff --git a/core/core/src/ansible/roles/recovery/defaults/main.yml b/core/core/src/ansible/roles/recovery/defaults/main.yml new file mode 100644 index 0000000000..7ae2f6b6dd --- /dev/null +++ b/core/core/src/ansible/roles/recovery/defaults/main.yml @@ -0,0 +1,2 @@ +--- +backup_dir: /home/{{ admin_user.name }}/backupdir diff --git a/core/core/src/ansible/roles/recovery/tasks/main.yml b/core/core/src/ansible/roles/recovery/tasks/main.yml new file mode 100644 index 0000000000..cea8d59c93 --- /dev/null +++ b/core/core/src/ansible/roles/recovery/tasks/main.yml @@ -0,0 +1,132 @@ +--- +- name: Reset kubeadm + shell: kubeadm reset -f + +- name: Create directory for certificates + file: + path: /etc/kubernetes/pki + state: directory + +- name: Create temporary directory + file: + path: "{{ backup_dir }}/tmp" + state: directory + +- name: Get files in a backup directory + find: + paths: "{{ backup_dir }}" + patterns: "k8s_backup_*.tar.gz" + register: found_files + +- name: Get latest file + set_fact: + latest_file: "{{ found_files.files | sort(attribute='mtime',reverse=true) | first }}" + +- name: Unarchive a tar gz archive + unarchive: + src: "{{ latest_file.path }}" + dest: "{{ backup_dir }}/tmp" + remote_src: yes + +# Ansible 2.8 +# - name: Restore certificates +# copy: +# src: "{{ backup_dir }}/tmp/pki/" +# dest: /etc/kubernetes/pki +# remote_src: yes + +# Ansible 2.7 +- name: Restore certificates + synchronize: + src: "{{ backup_dir }}/tmp/pki/" + dest: /etc/kubernetes/pki + recursive: yes + delegate_to: "{{ inventory_hostname }}" + +- name: Create data directory for etcd + file: + path: /var/lib/etcd + state: directory + +- name: Get etcd image name + shell: cat "{{ backup_dir }}/tmp/etcd_ver.txt" + register: etcd_image_name + +- name: Restore etcd backup + shell: > + docker run -v "{{ backup_dir }}/tmp":/backup \ + -v /var/lib/etcd:/var/lib/etcd \ + --env ETCDCTL_API=3 \ + --rm "{{ etcd_image_name.stdout }}" \ + /bin/sh -c "etcdctl snapshot restore '/backup/etcd-snapshot.db'; mv /default.etcd/member/ /var/lib/etcd/" + +- name: Check if kubeadm configuration file exists + stat: + path: "{{ backup_dir }}/tmp/kubeadm-config.yml" + register: stat_result + +- name: Create directory for kubeadm configuration file + file: + path: /etc/kubeadm + state: directory + when: stat_result.stat.exists + +- name: Restore kubeadm configuration file + copy: + src: "{{ backup_dir }}/tmp/kubeadm-config.yml" + dest: "/etc/kubeadm/kubeadm-config.yml" + remote_src: yes + when: stat_result.stat.exists + +- name: Initialize the master with backup including kubeadm configuration file + shell: kubeadm init --ignore-preflight-errors=DirAvailable--var-lib-etcd,NumCPU --config /etc/kubeadm/kubeadm-config.yml + when: stat_result.stat.exists + +- name: Initialize the master with backup + shell: kubeadm init --ignore-preflight-errors=DirAvailable--var-lib-etcd,NumCPU + when: not stat_result.stat.exists + +- name: Wait for all nodes to be ready + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get nodes -o json + register: output + until: output.stdout|from_json|json_query("items[*].status.conditions[?(@.type=='Ready')].status[]")|unique == ["True"] + retries: 120 + delay: 10 + +- name: Check cluster version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl version --short | grep -i server + register: cluster_version + +# https://github.com/kubernetes/kubeadm/issues/1471 Upgrading a 1.12 cluster thru 1.13 to 1.14 fails + +- name: Validate whether current cluster is upgradeable (from ver. 1.13) + + block: + - name: Show upgrade plan + shell: kubeadm upgrade plan + when: '"1.13" in cluster_version.stdout' + + rescue: + - name: Find the existing etcd server certificates + find: + paths: /etc/kubernetes/pki/etcd + patterns: "*server.*" + register: files_to_delete + + - name: Remove the existing etcd server certificates + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" + + - name: Regenerate the etcd server certificates + shell: kubeadm init phase certs etcd-server + +- name: Clean temporary directory + file: + state: absent + path: "{{ backup_dir }}/tmp/" diff --git a/core/core/src/ansible/roles/upgrade/defaults/main.yml b/core/core/src/ansible/roles/upgrade/defaults/main.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/core/core/src/ansible/roles/upgrade/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/core/core/src/ansible/roles/upgrade/tasks/main.yml b/core/core/src/ansible/roles/upgrade/tasks/main.yml new file mode 100644 index 0000000000..b9e715422c --- /dev/null +++ b/core/core/src/ansible/roles/upgrade/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: Check cluster version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl version --short | grep -i server + register: cluster_version + +- name: Check kubelet version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get node {{ inventory_hostname }} -o custom-columns=VERSION:.status.nodeInfo.kubeletVersion + register: kubelet_version + +- name: Upgrade master to {{ version }} + include_tasks: "upgrade_master.yml" + vars: + version: "{{ ver }}" + cni_version: "{{ cni_ver }}" + when: + - groups['master'][0] == inventory_hostname + - (version == "1.12.9" and "1.11" in cluster_version.stdout) or + (version == "1.13.7" and "1.12" in cluster_version.stdout) or + (version == "1.14.3" and "1.13" in cluster_version.stdout) + +- name: Upgrade nodes to {{ version }} + include_tasks: "upgrade_nodes.yml" + vars: + version: "{{ ver }}" + cni_version: "{{ cni_ver }}" + when: + - inventory_hostname in groups['worker'] + - (version == "1.12.9" and "1.11" in kubelet_version.stdout and kubelet_version.stdout != cluster_version.stdout) or + (version == "1.13.7" and "1.12" in kubelet_version.stdout and kubelet_version.stdout != cluster_version.stdout) or + (version == "1.14.3" and "1.13" in kubelet_version.stdout and kubelet_version.stdout != cluster_version.stdout ) diff --git a/core/core/src/ansible/roles/upgrade/tasks/upgrade_master.yml b/core/core/src/ansible/roles/upgrade/tasks/upgrade_master.yml new file mode 100644 index 0000000000..ba4e8c63f3 --- /dev/null +++ b/core/core/src/ansible/roles/upgrade/tasks/upgrade_master.yml @@ -0,0 +1,118 @@ +--- +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Drain master in preparation for maintenance + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data + +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Get information about installed packages as facts + package_facts: + manager: "auto" + +- name: Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module + apt: + name: + - kubernetes-cni + - kubelet + - kubectl + - kubeadm + state: absent + when: + - ansible_os_family == "Debian" + - (ansible_facts.packages['kubernetes-cni'][0].version is version (cni_version + '-00', '>')) or + (ansible_facts.packages['kubelet'][0].version is version (version + '-00', '>')) or + (ansible_facts.packages['kubectl'][0].version is version (version + '-00', '>')) or + (ansible_facts.packages['kubeadm'][0].version is version (version + '-00', '>')) + +- name: Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} and kubeadm {{ version }} packages for RedHat family + yum: + name: + - kubernetes-cni-{{ cni_version }} + - kubelet-{{ version }} + - kubectl-{{ version }} + - kubeadm-{{ version }} + update_cache: yes + allow_downgrade: yes + state: present + when: ansible_os_family == "RedHat" + +- name: Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} and kubeadm {{ version }} packages for Debian family + apt: + name: + - kubernetes-cni={{ cni_version }}-00 + - kubelet={{ version }}-00 + - kubectl={{ version }}-00 + - kubeadm={{ version }}-00 + update_cache: yes + state: present + when: ansible_os_family == "Debian" + +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +# https://github.com/kubernetes/kubeadm/issues/1471 Upgrading a 1.12 cluster thru 1.13 to 1.14 fails + +- name: Validate whether current cluster is upgradeable (from ver. 1.13) + + block: + - name: Show upgrade plan + shell: kubeadm upgrade plan v{{ version }} + when: '"1.13" in cluster_version.stdout' + + rescue: + - name: Find the existing etcd server certificates + find: + paths: /etc/kubernetes/pki/etcd + patterns: "*server.*" + register: files_to_delete + + - name: Remove the existing etcd server certificates + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" + + - name: Regenerate the etcd server certificates + shell: kubeadm init phase certs etcd-server + +- name: Validate whether current cluster is upgradeable + shell: kubeadm upgrade plan v{{ version }} + +- name: Upgrade Kubernetes cluster to the specified version v{{ version }} + shell: kubeadm upgrade apply -y v{{ version }} + retries: 5 + delay: 5 + register: output + until: output is succeeded + +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Reload daemon + shell: systemctl daemon-reload + +- name: Restart kubelet + shell: systemctl restart kubelet + +- name: Check kubelet status + shell: systemctl status kubelet + +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Uncordon master - mark master as schedulable + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl uncordon {{ inventory_hostname }} + retries: 5 + delay: 5 + register: output + until: output is succeeded + +- name: Verify cluster version + include_tasks: "verify.yml" diff --git a/core/core/src/ansible/roles/upgrade/tasks/upgrade_nodes.yml b/core/core/src/ansible/roles/upgrade/tasks/upgrade_nodes.yml new file mode 100644 index 0000000000..614a929de3 --- /dev/null +++ b/core/core/src/ansible/roles/upgrade/tasks/upgrade_nodes.yml @@ -0,0 +1,83 @@ +--- +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Drain node in preparation for maintenance + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl drain {{ inventory_hostname }} --ignore-daemonsets --delete-local-data + delegate_to: "{{ groups['master'][0] }}" + +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Get information about installed packages as facts + package_facts: + manager: "auto" + +- name: Remove newer Debian packages installed as dependencies if they exist # as there is no allow_downgrade parameter in ansible apt module + apt: + name: + - kubernetes-cni + - kubelet + - kubectl + - kubeadm + state: absent + when: + - ansible_os_family == "Debian" + - (ansible_facts.packages['kubernetes-cni'][0].version is version (cni_version + '-00', '>')) or + (ansible_facts.packages['kubelet'][0].version is version (version + '-00', '>')) or + (ansible_facts.packages['kubectl'][0].version is version (version + '-00', '>')) or + (ansible_facts.packages['kubeadm'][0].version is version (version + '-00', '>')) + +- name: Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} and kubeadm {{ version }} packages for RedHat family + yum: + name: + - kubernetes-cni-{{ cni_version }} + - kubelet-{{ version }} + - kubectl-{{ version }} + - kubeadm-{{ version }} + update_cache: yes + allow_downgrade: yes + state: present + when: ansible_os_family == "RedHat" + +- name: Install kubernetes-cni {{ cni_version }}, kubelet {{ version }}, kubectl {{ version }} and kubeadm {{ version }} packages for Debian family + apt: + name: + - kubernetes-cni={{ cni_version }}-00 + - kubelet={{ version }}-00 + - kubectl={{ version }}-00 + - kubeadm={{ version }}-00 + update_cache: yes + state: present + when: ansible_os_family == "Debian" + +- name: Upgrade node config + shell: kubeadm upgrade node config --kubelet-version v{{ version }} + +- name: Reload daemon + shell: systemctl daemon-reload + +- name: Restart kubelet + shell: systemctl restart kubelet + +- name: Check kubelet status + shell: systemctl status kubelet + +- name: Wait for the cluster's readiness + include_tasks: "wait.yml" + +- name: Uncordon node - mark node as schedulable + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl uncordon {{ inventory_hostname }} + retries: 5 + delay: 5 + register: output + until: output is succeeded + delegate_to: "{{ groups['master'][0] }}" + +- name: Verify cluster version + include_tasks: "verify.yml" + diff --git a/core/core/src/ansible/roles/upgrade/tasks/verify.yml b/core/core/src/ansible/roles/upgrade/tasks/verify.yml new file mode 100644 index 0000000000..964f553439 --- /dev/null +++ b/core/core/src/ansible/roles/upgrade/tasks/verify.yml @@ -0,0 +1,42 @@ +--- +- name: Get cluster version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl version --short | grep -i server + register: kubectl_cluster_version + +- name: Get kubectl version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl version --client --short | awk '{print $3}' + register: kubectl_client_version + +- name: Get kubeadm version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubeadm version -o short + register: kubeadm_version + +- name: Get node version + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get nodes {{ inventory_hostname }} -o wide | awk '{print $2" "$5}' + register: get_node_status + +- name: Verify cluster version + assert: + that: "'{{ version }}' in kubectl_cluster_version.stdout" + +- name: Verify kubectl version + assert: + that: "'{{ version }}' in kubectl_client_version.stdout" + +- name: Verify kubeadm version + assert: + that: "'{{ version }}' in kubeadm_version.stdout" + +- name: Verify node version and status + assert: + that: + - "'{{ version }}' in get_node_status.stdout" + - "'Ready' in get_node_status.stdout" diff --git a/core/core/src/ansible/roles/upgrade/tasks/wait.yml b/core/core/src/ansible/roles/upgrade/tasks/wait.yml new file mode 100644 index 0000000000..3019f114a9 --- /dev/null +++ b/core/core/src/ansible/roles/upgrade/tasks/wait.yml @@ -0,0 +1,37 @@ +--- +- name: Wait for kubectl to find and access a Kubernetes cluster + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl cluster-info + retries: 10 + delay: 10 + register: output + until: output is succeeded and "running" in output.stdout + +- name: Wait for all nodes to be ready + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get nodes -o json + register: output + until: output.stdout|from_json|json_query("items[*].status.conditions[?(@.type=='Ready')].status[]")|unique == ["True"] + retries: 120 + delay: 10 + +- name: Wait for all pods to be running + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get pods --all-namespaces -o json + register: output + until: output.stdout|from_json|json_query('items[*].status.phase')|unique == ["Running"] + retries: 120 + delay: 10 + +- name: Wait for all pods to be ready + environment: + KUBECONFIG: "/home/{{ admin_user.name }}/.kube/config" + shell: kubectl get pods --all-namespaces -o json + register: output + until: output.stdout|from_json|json_query('items[*].status.conditions[].status')|unique == ["True"] + retries: 120 + delay: 10 + diff --git a/core/core/src/ansible/upgrade.yml b/core/core/src/ansible/upgrade.yml new file mode 100644 index 0000000000..8b11dbe44f --- /dev/null +++ b/core/core/src/ansible/upgrade.yml @@ -0,0 +1,31 @@ +--- +# Ansible playbook for upgrading Kubernetes cluster + +- hosts: master:worker + serial: 1 + become: true + become_method: sudo + roles: + - { role: upgrade, ver: "1.12.9", cni_ver: "0.7.5" } + +- hosts: master:worker + serial: 1 + become: true + become_method: sudo + roles: + - { role: upgrade, ver: "1.13.7", cni_ver: "0.7.5" } + +- hosts: master:worker + serial: 1 + become: true + become_method: sudo + roles: + - { role: upgrade, ver: "1.14.3", cni_ver: "0.7.5" } + + +# latest patch versions: +# 1.11.10 +# 1.12.9 +# 1.13.7 +# 1.14.3 + diff --git a/core/core/src/docker/test-CI/prepare_sp.sh b/core/core/src/docker/test-CI/prepare_sp.sh index 21a4f1a618..aefe2afffc 100644 --- a/core/core/src/docker/test-CI/prepare_sp.sh +++ b/core/core/src/docker/test-CI/prepare_sp.sh @@ -3,8 +3,8 @@ mkdir -p core/build/azure/infrastructure/$RESOURCE_GROUP echo '{ "appId": "{{ sp_client_id }}", - "displayName": "epiphany-vsts", - "name": "http://epiphany-vsts", + "displayName": "{{ sp_app_name }}", + "name": "http://{{ sp_app_name }}", "password": "{{ sp_client_secret }}", "tenant": "{{ sp_tenant_id }}" } @@ -46,13 +46,13 @@ core: enable: False subscription_id: {{ sp_subscription_id }} -app_name: epiphany-vsts +app_name: {{ sp_app_name }} app_id: {{ sp_client_id }} tenant_id: {{ sp_tenant_id }} role: Contributor auth: {{ sp_client_secret }} auth_type: pwd " >> tmp/sp/security.yaml -sed -i "s/{{ sp_subscription_id }}/$SP_SUBSCRIPTION_ID/g; s/{{ sp_client_id }}/$SP_CLIENT_ID/g; s/{{ sp_tenant_id }}/$SP_TENANT_ID/g; s/{{ sp_client_secret }}/$SP_CLIENT_SECRET/g" tmp/sp/* +sed -i "s/{{ sp_subscription_id }}/$SP_SUBSCRIPTION_ID/g; s/{{ sp_client_id }}/$SP_CLIENT_ID/g; s/{{ sp_tenant_id }}/$SP_TENANT_ID/g; s/{{ sp_client_secret }}/$SP_CLIENT_SECRET/g; s/{{ sp_app_name }}/$SP_APP_NAME/g" tmp/sp/* cp tmp/sp/* core/build/azure/infrastructure/$RESOURCE_GROUP rm -rf tmp \ No newline at end of file diff --git a/core/core/src/scripts/helpers/simple-log-collector.sh b/core/core/src/scripts/helpers/simple-log-collector.sh new file mode 100644 index 0000000000..a33276e28c --- /dev/null +++ b/core/core/src/scripts/helpers/simple-log-collector.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Copyright 2019 ABB. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# USAGE: ./logs.sh /home/my_user/my_logs + +OUTPUT_LOCATION=$1 + +log_locations=("/var/log/audit/audit.log" "/var/log/auth.log" "/var/log/firewalld" "/var/log/haproxy.log" "/var/log/kafka/server.log" "/var/log/messages" "/var/log/secure" "/var/log/syslog") + +mkdir -p "$OUTPUT_LOCATION/$HOSTNAME" +printf "[CREATED]: Directory %s\n" "$OUTPUT_LOCATION/$HOSTNAME" + +for ix in ${!log_locations[*]} +do + + if [[ -f ${log_locations[$ix]} ]] + then + file_name="$(basename ${log_locations[$ix]})" + output_file_path="$OUTPUT_LOCATION/$HOSTNAME/$file_name" + cp "${log_locations[$ix]}" "$output_file_path" + printf "[COPIED]: %s to %s/%s \n" "${log_locations[$ix]}" "$output_file_path" + else + printf "[SKIPPED]: Not exists: %s\n" "${log_locations[$ix]}" + fi +done +echo + +if [[ -x "$(command -v docker)" ]]; then + containers=$(sudo docker ps -a --format '{{.Names}}') + for container in $containers + do + docker_logs_path="$OUTPUT_LOCATION/$HOSTNAME/$container" + sudo docker logs $container &> $docker_logs_path + printf "[CREATED]: Docker logs %s \n" "$docker_logs_path" + done +else + echo "[SKIPPED]: No docker found." +fi diff --git a/core/core/src/templates/common/manifest.yaml.j2 b/core/core/src/templates/common/manifest.yaml.j2 index eda707ec3b..e0b0d6d586 100644 --- a/core/core/src/templates/common/manifest.yaml.j2 +++ b/core/core/src/templates/common/manifest.yaml.j2 @@ -87,7 +87,7 @@ nodes: {%- endfor %} kubernetes: - version: 1.13.1 + version: 1.14.4 storage: enable: {{ core.kubernetes.storage.enable }} type: {{ core.kubernetes.storage.type }} @@ -107,6 +107,11 @@ kubernetes: {%- endif %} allow_pods_on_master: {{ core.kubernetes.allow_pods_on_master }} +{%- if core.kubernetes.cni_plugin is defined %} +kubernetes_specification: + cni_plugin: {{ core.kubernetes.cni_plugin }} +{%- endif %} + {%- if core.haproxy is defined %} haproxy: {% if core.haproxy.http_request_timeout is defined and core.haproxy.http_request_timeout is not none %} diff --git a/core/core/src/templates/common/proxy.sh.j2 b/core/core/src/templates/common/proxy.sh.j2 index da0dac23c9..397cf2aec6 100644 --- a/core/core/src/templates/common/proxy.sh.j2 +++ b/core/core/src/templates/common/proxy.sh.j2 @@ -15,12 +15,6 @@ # limitations under the License. # -# gen_docs.sh creates the doc subdirectory trees and then copies the required *.md files to the given folder overridding -# whatever is those doc directories. The VSTS Wiki allow for documentation from *.md files so restricting to only /docs -# folder allows for a more controlled environment. - -# NOTE: There needs to be templates created and data entered for the docs just like the other areas of Epiphany. - # Exit immediately if something goes wrong. set -eu diff --git a/core/core/test/serverspec/spec/haproxy_tls_termination/haproxy_tls_termination_spec.rb b/core/core/test/serverspec/spec/haproxy_tls_termination/haproxy_tls_termination_spec.rb index 09d423b6ce..ff5b3bb773 100644 --- a/core/core/test/serverspec/spec/haproxy_tls_termination/haproxy_tls_termination_spec.rb +++ b/core/core/test/serverspec/spec/haproxy_tls_termination/haproxy_tls_termination_spec.rb @@ -18,7 +18,7 @@ it { should exist } it { should belong_to_group 'haproxy' } it { should have_home_directory '/var/lib/haproxy' } - it { should have_login_shell '/usr/sbin/nologin' } + it { should have_login_shell('/usr/sbin/nologin').or have_login_shell('/sbin/nologin') } # HAProxy user shell is located in /sbin/nologin on RedHat end end diff --git a/core/core/test/serverspec/spec/master/master_spec.rb b/core/core/test/serverspec/spec/master/master_spec.rb index 35da1a257a..9b6539cf8e 100644 --- a/core/core/test/serverspec/spec/master/master_spec.rb +++ b/core/core/test/serverspec/spec/master/master_spec.rb @@ -144,7 +144,7 @@ end end describe 'Checking if the dashboard is available' do - describe command('for i in {1..60}; do if [ $(curl -o /dev/null -s -w "%{http_code}" "http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/") -eq 200 ]; \ + describe command('for i in {1..60}; do if [ $(curl -o /dev/null -s -w "%{http_code}" "http://localhost:8001/api/v1/namespaces/$(kubectl get deployments --all-namespaces --field-selector metadata.name=kubernetes-dashboard --no-headers | awk \'{print $1}\')/services/https:kubernetes-dashboard:/proxy/") -eq 200 ]; \ then echo -n "200"; break; else sleep 1; fi; done') do it "is expected to be equal" do expect(subject.stdout.to_i).to eq 200 diff --git a/core/data/azure/infrastructure/epiphany-bld-apps/data.yaml b/core/data/azure/infrastructure/epiphany-bld-apps/data.yaml index c13a7b7697..8e1037302a 100644 --- a/core/data/azure/infrastructure/epiphany-bld-apps/data.yaml +++ b/core/data/azure/infrastructure/epiphany-bld-apps/data.yaml @@ -8,7 +8,7 @@ title: Epiphany Apps Infrastructure... kind: datafile -version: 0.2.2 +version: 0.3.0 # NOTE: Any data values that are empty put "" or the value None will be used in the templates for those attributes. @@ -74,6 +74,7 @@ core: - kafka-exporter - postgresql - rabbitmq + - deployments # !Caution! # Disable this role if you don't want restart your servers - reboot @@ -457,9 +458,10 @@ core: roles: - linux - master + - deployments - node_exporter - filebeat - #- reboot + - reboot delete_os_disk_on_termination: false delete_data_disks_on_termination: false @@ -1152,7 +1154,7 @@ core: name: rabbitmq-cluster port: 30672 management_port: 31672 - replicas: 2 + replicas: 0 namespace: queue rabbitmq: # amqp_port: 5672 #optional - default 5672 @@ -1176,7 +1178,7 @@ core: service: name: auth-service port: 30104 - replicas: 2 + replicas: 0 namespace: auth-tools admin_user: admin admin_password: admin diff --git a/core/data/azure/infrastructure/epiphany-playground/basic-data.yaml b/core/data/azure/infrastructure/epiphany-playground/basic-data.yaml index 1bc4370751..b1dea06854 100644 --- a/core/data/azure/infrastructure/epiphany-playground/basic-data.yaml +++ b/core/data/azure/infrastructure/epiphany-playground/basic-data.yaml @@ -2,7 +2,7 @@ # Simplified datafile that you can use together with template (see README.md in this folder). # Change values according to your needs, start with generating ssh keys and placing them in the directory "keys_directory". Do not forget to update "keys_directory" as well. kind: simplified-datafile -version: 0.2.2 +version: 0.3.0 environment_name: Playground azure: subscription_name: YOUR-SUBSCRIPTION-NAME diff --git a/core/data/azure/infrastructure/epiphany-qa-basic/basic-data.yaml b/core/data/azure/infrastructure/epiphany-qa-basic/basic-data.yaml index 04ab7973aa..920f312b6a 100644 --- a/core/data/azure/infrastructure/epiphany-qa-basic/basic-data.yaml +++ b/core/data/azure/infrastructure/epiphany-qa-basic/basic-data.yaml @@ -1,7 +1,7 @@ --- # Simplified datafile that you can use together with QA template. kind: simplified-datafile -version: 0.2.2 +version: 0.3.0 environment_name: {{ resource_group }} azure: subscription_name: {{ sp_subscription_name }} @@ -30,3 +30,4 @@ config: rabbitmq_deployment: {{ rabbitmq_deployment }} auth_service_deployment: {{ auth_service_deployment }} alerts_for_monitoring: {{ alerts_for_monitoring }} + cni_plugin: {{ cni_plugin }} \ No newline at end of file diff --git a/core/data/azure/infrastructure/epiphany-qa-template/data.yaml.j2 b/core/data/azure/infrastructure/epiphany-qa-template/data.yaml.j2 index a1870087a7..d837d1a5ab 100644 --- a/core/data/azure/infrastructure/epiphany-qa-template/data.yaml.j2 +++ b/core/data/azure/infrastructure/epiphany-qa-template/data.yaml.j2 @@ -9,7 +9,7 @@ title: Epiphany ({{ azure.image_offer }}) {{ environment_name }} kind: datafile -version: 0.2.2 +version: 0.3.0 # NOTE: Any data values that are empty put "" or the value None will be used in the templates for those attributes. @@ -1176,6 +1176,7 @@ core: kubernetes: version: 1.13.1 + cni_plugin: {{ config.cni_plugin }} # image_registry_secrets: # - name: regcred # server_url: your-registry-url diff --git a/core/data/azure/infrastructure/epiphany-single-machine/data.yaml b/core/data/azure/infrastructure/epiphany-single-machine/data.yaml index e28af9e7da..39b2b886f1 100644 --- a/core/data/azure/infrastructure/epiphany-single-machine/data.yaml +++ b/core/data/azure/infrastructure/epiphany-single-machine/data.yaml @@ -8,7 +8,7 @@ title: Epiphany Single Machine Infrastructure... kind: datafile -version: 0.2.0 +version: 0.3.0 # NOTE: Any data values that are empty put "" or the value None will be used in the templates for those attributes. diff --git a/core/data/azure/infrastructure/epiphany-template/data.yaml.j2 b/core/data/azure/infrastructure/epiphany-template/data.yaml.j2 index 930cb82537..d8f8a9d767 100644 --- a/core/data/azure/infrastructure/epiphany-template/data.yaml.j2 +++ b/core/data/azure/infrastructure/epiphany-template/data.yaml.j2 @@ -9,7 +9,7 @@ title: Epiphany ({{ azure.image_offer }}) {{ environment_name }} kind: datafile -version: 0.2.2 +version: 0.3.0 # NOTE: Any data values that are empty put "" or the value None will be used in the templates for those attributes. diff --git a/core/data/metal/epiphany-lab/data.yaml b/core/data/metal/epiphany-lab/data.yaml index 05ac565859..120c691f41 100644 --- a/core/data/metal/epiphany-lab/data.yaml +++ b/core/data/metal/epiphany-lab/data.yaml @@ -4,7 +4,7 @@ kind: datafile -version: 1.0.1 +version: 0.3.0 # This will apply to a VPN like environment or an air-gapped like environment bastian: diff --git a/core/data/metal/epiphany-single-machine/data.yaml b/core/data/metal/epiphany-single-machine/data.yaml index 5095967493..271e69987c 100644 --- a/core/data/metal/epiphany-single-machine/data.yaml +++ b/core/data/metal/epiphany-single-machine/data.yaml @@ -4,7 +4,7 @@ kind: datafile -version: 0.2.0 +version: 0.3.0 # This will apply to a VPN like environment or an air-gapped like environment bastian: diff --git a/core/data/vmware/epiphany-lab/data.yaml b/core/data/vmware/epiphany-lab/data.yaml index 3bfed06b13..b0dd85de60 100644 --- a/core/data/vmware/epiphany-lab/data.yaml +++ b/core/data/vmware/epiphany-lab/data.yaml @@ -4,7 +4,7 @@ kind: datafile -version: 1.0.1 +version: 0.3.0 # This will apply to a VPN like environment or an air-gapped like environment bastian: diff --git a/core/epiphany_hdi_settings.json b/core/epiphany_hdi_settings.json deleted file mode 100644 index e7f620a9f2..0000000000 --- a/core/epiphany_hdi_settings.json +++ /dev/null @@ -1,38 +0,0 @@ -// workspace configuration template of HDInsight extension -{ - /* example: - "script_to_cluster": [{ - "clusterName": "hdi_cluster_1", - "filePath": "a.hql" - }, - { - "clusterName": "hdi_cluster_2", - "filePath": "src/b.py" - }] - */ - "script_to_cluster": [{ - - }], - /* more details from: https://github.com/cloudera/livy - examples: - "livy_conf": { - "driverMemory": "1G", - "driverCores": 2, - "executorMemory": "512M", - "executorCores": 10, - "numExecutors": 5 - } - */ - "livy_conf": { - - }, - /* examples: - "additional_conf": { - azure_environment: AzureChina // Only Azure or AzureChina works here - } - */ - - "additional_conf": { - - } -} \ No newline at end of file diff --git a/core/manifest.yaml b/core/manifest.yaml index d8eddc443f..dea33bf9f3 100644 --- a/core/manifest.yaml +++ b/core/manifest.yaml @@ -3,7 +3,7 @@ # This data is for the core of Epiphany and not the data for a given environment. name: Epiphany -version: 0.2.2 +version: 0.3.0 # Set the proxy info up if your environment requires it. This is sometimes the case for on-premise builds/installs proxy: diff --git a/core/proxy.sh b/core/proxy.sh index 7efb01434d..8c332b66d8 100755 --- a/core/proxy.sh +++ b/core/proxy.sh @@ -15,12 +15,6 @@ # limitations under the License. # -# gen_docs.sh creates the doc subdirectory trees and then copies the required *.md files to the given folder overridding -# whatever is those doc directories. The VSTS Wiki allow for documentation from *.md files so restricting to only /docs -# folder allows for a more controlled environment. - -# NOTE: There needs to be templates created and data entered for the docs just like the other areas of Epiphany. - # Exit immediately if something goes wrong. set -eu diff --git a/core/src/core/dummy.py b/core/src/core/dummy.py deleted file mode 100644 index bc562e704a..0000000000 --- a/core/src/core/dummy.py +++ /dev/null @@ -1,8 +0,0 @@ - - -def func(x): - return x + 3 - - -def test_answer(): - assert func(4) == 7 diff --git a/core/src/core/main.py b/core/src/core/main.py deleted file mode 100644 index 85ae1454e2..0000000000 --- a/core/src/core/main.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/python -# -# Copyright 2019 ABB. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -if __name__ == '__main__': - pass diff --git a/core/src/core/template_engine_3 b/core/src/core/template_engine_3 deleted file mode 100755 index e03b8f5322..0000000000 --- a/core/src/core/template_engine_3 +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python3 -# -# Author: Hans Chris Jones -# Copyright 2018, LambdaStack -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# NOTE: You need to pass in the full paths to the file references below. The data is file should be private and it's -# not in the project. The reason is you should create a YAML file that fits how you want to configure your -# environment. For eample, you can have a small YAML data file for configuring the kickstart/ISO process and then -# maybe one for building out the missing USER and/or SYSTEM data used in the CHEF Environment files. A sample -# environment file ships with the project for vagrant called vagrant.json. However, a production.json should -# really be a jinja2 template like base_environment.json.j2 with as much default data and with template {{ }} placeholders -# for the actual data. The output of this process should be the TRUE production.json file. Also, it's a good idea -# to name your production.json file more descriptive of the environment it actually belongs to. For example, -# prod-dc101.json or something like it. - -import argparse -import json -import sys -import traceback -import logging - -import yaml -from jinja2 import Environment, FileSystemLoader - -_SEPARATOR = '=' * 60 - - -# All three file paths must be full paths to each. -def render_template(data_file, in_file, out_file, json_arg, yaml_arg): - template_dict = {} - - logging.basicConfig(level=logging.INFO) - log = logging.getLogger(__name__) - - # If the -j flag was passed then convert the yaml to pretty json in sorted order - if json_arg: - with open(data_file) as data: - template_dict = yaml.load(data) - log.info(json.dumps(template_dict, indent=4, sort_keys=True)) - sys.exit(0) - - if yaml_arg: - with open(data_file) as data: - template_dict = json.load(data) - log.info(yaml.safe_dump(template_dict, indent=2, allow_unicode=True, default_flow_style=False)) - sys.exit(0) - - # Start the template processing - try: - # env = Environment(autoescape=False, loader=FileSystemLoader('/')), trim_blocks=True) - env = Environment(loader=FileSystemLoader('/')) - env.filters['jsonify'] = json.dumps - - with open(data_file) as data: - template_dict = yaml.load(data) - - # Render template and print generated config to console - template = env.get_template(in_file) - - with open(out_file, 'w') as f: - output = template.render(template_dict) - f.write(output) - - except Exception as e: - # Print out error, traceback and debug info... - log.error("Template Engine stopped due to the following error ===> ", e) - log.error(_SEPARATOR) - log.error('Debugging Output:') - log.error(traceback) - log.error(_SEPARATOR) - log.error('Data dictionary:') - log.error(json.dumps(template_dict, indent=4, sort_keys=True)) - log.error(_SEPARATOR) - log.error("Template Engine stopped due to the following error ===> ", e) - log.error('Scan up to see traceback and JSON data (traceback at both top and bottom of this output)') - log.error(_SEPARATOR) - log.error('Debugging Output:') - log.error(traceback) - log.error(_SEPARATOR) - sys.exit(1) - - -# Used to pass a string instead of input file as a template -# dict is json dictionary of the values to sub -def render_string(in_string, template_dict): - return Environment().from_string(in_string).render(template_dict) - - -# Standard way of calling... -# ./template_engine_3 -i