Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/helm pr deployment demo #9690

Closed
wants to merge 11 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,9 @@ jobs:
platforms: linux/amd64,linux/arm64
file: ./docker/artemis/Dockerfile
context: .
tags: ghcr.io/ls1intum/artemis:${{ steps.compute-tag.outputs.result }}
tags: |
ghcr.io/ls1intum/artemis:${{ steps.compute-tag.outputs.result }}
ghcr.io/ls1intum/artemis:${{ github.sha }}
push: true
cache-from: type=gha
cache-to: type=gha,mode=min
Expand Down
100 changes: 100 additions & 0 deletions .github/workflows/helm_pr_deployment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
name: Deploy PR to Kubernetes

on:
pull_request:
types: [opened, synchronize, reopened, labeled]

jobs:
deploy:
if: ${{ github.event.pull_request.head.repo.full_name == 'ls1intum/Artemis' && contains(github.event.pull_request.labels.*.name, 'deploy:k8s') }}
runs-on: ubuntu-latest
environment:
name: k8s
url: https://pr${{ github.event.pull_request.number }}.artemis-k8s.ase.cit.tum.de
concurrency:
group: k8s-pr${{ github.event.pull_request.number }}
cancel-in-progress: true

steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Setup Helm
uses: azure/setup-helm@v3
with:
version: v3.15.0

- name: Setup Kubectl
uses: azure/setup-kubectl@v3
with:
version: v1.30.1

- name: Authenticate to Kubernetes cluster
run: |
mkdir -p $HOME/.kube
echo "${{ secrets.KUBECONFIG }}" > "$HOME/.kube/config"

- name: Get branch name
id: branch
run: echo "BRANCH_NAME=\"${GITHUB_REF#refs/heads/}\"" >> "$GITHUB_ENV"

- name: Determine tag
id: determine-tag
run: echo "SEM_VERSION=0.0.0-pr${{ github.event.pull_request.number }}" >> "$GITHUB_ENV"

- name: Update chart dependencies
run: helm dependency update helm/artemis

- name: Chart | Push
uses: appany/helm-oci-chart-releaser@v0.3.0
with:
name: artemis
repository: ${{ github.repository_owner }}/helm
tag: ${{ env.SEM_VERSION }}
path: helm/artemis
registry: ghcr.io
registry_username: ${{ github.repository_owner }}
registry_password: ${{ secrets.GITHUB_TOKEN }}
update_dependencies: 'true'

- name: Deploy Helm chart
run: |
helm upgrade --install artemis \
oci://ghcr.io/${{ github.repository_owner }}/helm/artemis --version ${{ env.SEM_VERSION }} \
--namespace artemis-pr${{ github.event.pull_request.number }} --create-namespace \
--set artemis.ingress.className="" \
--set artemis.ingress.annotations.cert-manager\\.io/cluster-issuer="letsencrypt-prod" \
--set artemis.ingress.annotations.spec\\.ingressClassName=nginx \
--set-string artemis.ingress.annotations.kubernetes\\.io/tls-acme="true" \
--set-string artemis.ingress.annotations.nginx\\.ingress\\.kubernetes\\.io/ssl-redirect="true" \
--set artemis.ingress.hosts[0].host=pr${{ github.event.pull_request.number }}.artemis-k8s.ase.cit.tum.de \
--set artemis.ingress.hosts[0].paths[0].path="/" \
--set artemis.ingress.hosts[0].paths[0].pathType=ImplementationSpecific \
--set artemis.ingress.tls[0].secretName=artemis-pr${{ github.event.pull_request.number }}-tls \
--set artemis.ingress.tls[0].hosts[0]=pr${{ github.event.pull_request.number }}.artemis-k8s.ase.cit.tum.de \
--set application.registry.jwt=${{ secrets.REGISTRY_JWT }} \
--set application.versioncontrol.url="https://pr${{ github.event.pull_request.number }}.artemis-k8s.ase.cit.tum.de" \
--set application.userManagement.internalAdmin.username=admin \
--set application.userManagement.internalAdmin.password=${{ secrets.ADMIN_PW }} \
--set application.operator.name="Technical University of Munich" \
--set application.operator.admin_name="Stephan Krusche" \
--set artemisVersion=${{ github.sha }} \
--timeout 15m \
--wait

- name: Post Deployment Comment
uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const prNumber = context.issue.number;

github.rest.issues.createComment({
...context.repo,
issue_number: prNumber,
body: `
:rocket: Deployed **PR #${prNumber}** to https://pr${prNumber}.artemis-k8s.ase.cit.tum.de

:hourglass_flowing_sand: It might take up to **10 minutes** to fully start up.
`
});
40 changes: 40 additions & 0 deletions .github/workflows/helm_pr_deployment_delete.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: Cleanup PR Deployment from Kubernetes

on:
pull_request:
types: [closed]

jobs:
destroy:
if: ${{ github.event.pull_request.head.repo.full_name == 'ls1intum/Artemis' && contains(github.event.pull_request.labels.*.name, 'deploy:k8s') }}
runs-on: ubuntu-latest
environment:
name: k8s
url: https://pr${{ github.event.pull_request.number }}.artemis-k8s.ase.cit.tum.de
concurrency:
group: k8s-pr${{ github.event.pull_request.number }}
cancel-in-progress: true

steps:
- name: Setup Helm
uses: azure/setup-helm@v3
with:
version: v3.15.0

- name: Setup Kubectl
uses: azure/setup-kubectl@v3
with:
version: v1.30.1

- name: Authenticate to Kubernetes cluster
run: |
mkdir -p $HOME/.kube
echo "${{ secrets.KUBECONFIG }}" > "$HOME/.kube/config"

- name: Delete Helm release
run: |
helm uninstall artemis --namespace artemis-pr${{ github.event.pull_request.number }}

- name: Delete Kubernetes namespace
run: |
kubectl delete namespace artemis-pr${{ github.event.pull_request.number }}
19 changes: 19 additions & 0 deletions docs/dev/testservers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ Test Servers
| | - 1 Node | | | |
| | - LTI | | | |
+--------------------------------------------+-----------------------------+----------------+---------------------+----------------+
| https://pr*.artemis-k8s.ase.cit.tum.de | - Integrated Code Lifecycle | GitHub | - Admin | Always |
| | - MySQL | | | |
+--------------------------------------------+-----------------------------+----------------+---------------------+----------------+
| **Legacy Testservers**: See on confluence below |
+--------------------------------------------+-----------------------------+----------------+---------------------+----------------+

Expand Down Expand Up @@ -111,6 +114,22 @@ GitHub Deployment
.. figure:: testservers/github/remove-lock-label.png
:alt: GitHub Actions UI: Remove lock label

GitHub Deployment
-----------------

1. Label your PR with deploy:k8s to initialize a deployment to Kubernetes.

2. Once the server is deployed you get a message from GitHub Actions

.. figure:: testservers/github/k8s-deploy-complete.png
:alt: GitHub Actions Comment: successful deployment

2. Perform your testing

a) For logs, log in to Rancher: https://rancher.ase.cit.tum.de

3. As soon as you're done with this PR, close or merge it and the deployment will be destroyed. Don't remove the label!
Comment on lines +117 to +131
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Documentation needs improvements for clarity and completeness.

The Kubernetes deployment instructions could be enhanced:

  1. Fix step numbering (step 2 appears twice)
  2. Add a screenshot showing where to find and apply the deploy:k8s label
  3. Include expected deployment duration
  4. Add a troubleshooting section
  5. Explain what to do if deployment fails

Consider this structure:

 GitHub Deployment
 -----------------

 1. Label your PR with deploy:k8s to initialize a deployment to Kubernetes.
+    .. figure:: testservers/github/k8s-label-location.png
+        :alt: GitHub UI: Location of deploy:k8s label
+
+    Note: Deployment typically takes 5-10 minutes.

 2. Once the server is deployed you get a message from GitHub Actions

     .. figure:: testservers/github/k8s-deploy-complete.png
         :alt: GitHub Actions Comment: successful deployment

-2. Perform your testing
+3. Perform your testing

     a) For logs, log in to Rancher: https://rancher.ase.cit.tum.de

-3. As soon as you're done with this PR, close or merge it and the deployment will be destroyed. Don't remove the label!
+4. As soon as you're done with this PR, close or merge it and the deployment will be destroyed. Don't remove the label!
+
+Troubleshooting
+---------------
+
+If deployment fails:
+1. Check the GitHub Actions logs for error messages
+2. Ensure your PR branch is up to date with the main branch
+3. Contact the DevOps team if issues persist

Committable suggestion skipped: line range outside the PR's diff.


Bamboo Deployment
-----------------
1. In your Pull Request on GitHub, scroll all the way down to the build status.
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions helm/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
charts
9 changes: 9 additions & 0 deletions helm/artemis/Chart.lock
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
dependencies:
- name: prometheus-adapter
repository: https://prometheus-community.github.io/helm-charts
version: 3.4.1
- name: prometheus
repository: https://prometheus-community.github.io/helm-charts
version: 15.18.0
digest: sha256:9eb272bb161e9a4bf9fff7ec31a7719664ff2dce11e28be0d94a83bded0cffa1
generated: "2024-08-10T17:33:10.801079972+02:00"
46 changes: 46 additions & 0 deletions helm/artemis/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
apiVersion: v2
name: artemis
description: A Helm chart for Artemis

# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application

# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
#TODO: figure out how and when this is updated
version: 1.0.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "7.5.0"


sources:
- https://github.com/ls1intum/Artemis
- https://github.com/ls1intum/artemis-helm

maintainers:
- name: Matthias Linhuber
email: matthias.linhuber@tum.de

icon: "https://github.com/ls1intum/Artemis/blob/develop/src/main/resources/public/images/logo.png"

dependencies:
- name: prometheus-adapter
repository: https://prometheus-community.github.io/helm-charts
version: 3.4.1
condition: artemis.autoscaler.customPrometheus
- name: prometheus
repository: https://prometheus-community.github.io/helm-charts
version: 15.18.0
condition: artemis.autoscaler.customPrometheus
22 changes: 22 additions & 0 deletions helm/artemis/templates/NOTES.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.artemis.ingress.enabled }}
{{- range $host := .Values.artemis.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.artemis.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.artemis.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "artemis.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.artemis.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "artemis.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "artemis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.artemis.service.port }}
Comment on lines +12 to +16
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Enhance cloud provider compatibility for LoadBalancer IP retrieval

The current implementation might not work correctly with all cloud providers, as some return a hostname instead of an IP address in the LoadBalancer ingress field.

  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "artemis.fullname" . }}'
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "artemis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "artemis.fullname" . }} --template "{{"{{ with index .status.loadBalancer.ingress 0 }}{{or .ip .hostname}}{{end}}")
  echo http://$SERVICE_IP:{{ .Values.artemis.service.port }}
+ echo "NOTE: Some cloud providers may return a hostname instead of an IP address"
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
{{- else if contains "LoadBalancer" .Values.artemis.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "artemis.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "artemis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.artemis.service.port }}
{{- else if contains "LoadBalancer" .Values.artemis.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "artemis.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "artemis.fullname" . }} --template "{{"{{ with index .status.loadBalancer.ingress 0 }}{{or .ip .hostname}}{{end}}")
echo http://$SERVICE_IP:{{ .Values.artemis.service.port }}
echo "NOTE: Some cloud providers may return a hostname instead of an IP address"
🧰 Tools
🪛 LanguageTool

[uncategorized] ~14-~14: Loose punctuation mark.
Context: ... svc -w {{ include "artemis.fullname" . }}' export SERVICE_IP=$(kubectl get svc...

(UNLIKELY_OPENING_PUNCTUATION)


[uncategorized] ~15-~15: Loose punctuation mark.
Context: ...pace }} {{ include "artemis.fullname" . }} --template "{{"{{ range (index .status...

(UNLIKELY_OPENING_PUNCTUATION)

{{- else if contains "ClusterIP" .Values.artemis.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "artemis.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
Comment on lines +17 to +21
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Improve ClusterIP configuration robustness

Two improvements are suggested:

  1. The local port (8080) should be configurable to avoid conflicts
  2. Pod selection should handle multiple replicas gracefully
  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "artemis.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
  export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
- echo "Visit http://127.0.0.1:8080 to use your application"
- kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+ export LOCAL_PORT={{ .Values.artemis.service.localPort | default "8080" }}
+ echo "Visit http://127.0.0.1:${LOCAL_PORT} to use your application"
+ echo "NOTE: If the application has multiple replicas, you might need to specify a different pod"
+ kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME ${LOCAL_PORT}:$CONTAINER_PORT

Committable suggestion skipped: line range outside the PR's diff.

🧰 Tools
🪛 LanguageTool

[uncategorized] ~18-~18: Loose punctuation mark.
Context: ...tes.io/name={{ include "artemis.name" . }},app.kubernetes.io/instance={{ .Release...

(UNLIKELY_OPENING_PUNCTUATION)

{{- end }}
82 changes: 82 additions & 0 deletions helm/artemis/templates/_helpers.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "artemis.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "artemis.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}

{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "artemis.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Common labels
*/}}
{{- define "artemis.labels" -}}
helm.sh/chart: {{ include "artemis.chart" . }}
{{ include "artemis.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}

{{/*
Selector labels
*/}}
{{- define "artemis.selectorLabels" -}}
app.kubernetes.io/name: {{ include "artemis.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

{{/*
Create the name of the service account to use
*/}}
{{- define "artemis.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "artemis.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}


{{/*
Generate common Artemis java spring profiles
*/}}
{{- define "artemis.springprofiles" -}}
prod,artemis,decoupling
{{- if .Values.application.userManagement.provider -}}
,{{ .Values.application.userManagement.provider }}
{{- end }}
{{- if .Values.application.userManagement.ldap.enabled -}}
,ldap
{{- end }}
{{- if .Values.application.versioncontrol.provider -}}
,{{ .Values.application.versioncontrol.provider }}
{{- end }}
{{- if .Values.application.continuousintegration.provider -}}
,{{ .Values.application.continuousintegration.provider }}
{{- end }}
{{- end }}
41 changes: 41 additions & 0 deletions helm/artemis/templates/autoscaler/horizontal-pod-autoscaler.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
{{- if .Values.artemis.autoscaler.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "artemis.fullname" . }}
labels:
{{- include "artemis.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "artemis.fullname" . }}
minReplicas: {{ .Values.artemis.autoscaler.minReplicas }}
maxReplicas: {{ .Values.artemis.autoscaler.maxReplicas }}
{{- with .Values.artemis.autoscaler.behavior }}
behavior:
{{- toYaml . | nindent 8 }}
{{- end }}
metrics:
{{- if .Values.artemis.autoscaler.metrics.usersPerInstance }}
- type: Pods
pods:
metric:
name: artemis_instance_websocket_users
target:
type: AverageValue
averageValue: {{ .Values.artemis.autoscaler.metrics.usersPerInstance }}
{{- end }}
{{- range .Values.artemis.autoscaler.metrics.externalMetrics }}
- type: External
external:
metric:
name: {{ .name }}
selector:
matchLabels:
{{- toYaml .labels | nindent 12 }}
target:
type: AverageValue
averageValue: {{ .targetValue }}
{{- end }}
{{- end }}
Loading
Loading