Skip to content

Commit

Permalink
Merge pull request #130 from subhamkrai/fix-ns-arg-issue
Browse files Browse the repository at this point in the history
namespace: namespace flags are not working
  • Loading branch information
subhamkrai authored Jun 23, 2023
2 parents e43ea9b + f250e2e commit 519ca0b
Show file tree
Hide file tree
Showing 4 changed files with 111 additions and 7 deletions.
103 changes: 102 additions & 1 deletion .github/workflows/go-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ concurrency:
cancel-in-progress: true

jobs:
go-test:
default-namespace:
runs-on: ubuntu-20.04
steps:
- name: checkout
Expand Down Expand Up @@ -116,3 +116,104 @@ jobs:
- name: consider debugging
if: failure()
uses: mxschmitt/action-tmate@v3

custom-namespace:
runs-on: ubuntu-20.04
steps:
- name: checkout
uses: actions/checkout@v2
with:
fetch-depth: 0

- name: setup cluster
uses: ./.github/workflows/cluster-setup
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
op-ns: "test-operator"
cluster-ns: "test-cluster"

- name: build the binary and run unit tests
run: |
make build
sudo cp bin/kubectl-rook-ceph /usr/local/bin/kubectl-rook_ceph
make test
- name: Cluster Health
run: |
set -e
kubectl rook-ceph --operator-namespace test-operator -n test-cluster health
- name: Ceph status
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph status
- name: Mon restore
env:
ROOK_PLUGIN_SKIP_PROMPTS: true
run: |
set -ex
# test the mon restore to restore to mon a, delete mons b and c, then add d and e
kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons restore-quorum a
kubectl -n test-cluster wait pod -l app=rook-ceph-mon-b --for=delete --timeout=90s
kubectl -n test-cluster wait pod -l app=rook-ceph-mon-c --for=delete --timeout=90s
tests/github-action-helper.sh wait_for_three_mons test-cluster
kubectl -n test-cluster wait deployment rook-ceph-mon-d --for condition=Available=True --timeout=90s
kubectl -n test-cluster wait deployment rook-ceph-mon-e --for condition=Available=True --timeout=90s
- name: Rbd command
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rbd ls replicapool
- name: Get mon endpoints
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster mons
- name: Update operator configmap
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator set ROOK_LOG_LEVEL DEBUG
- name: Print cr status
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook version
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status all
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook status cephobjectstores
- name: Restart operator pod
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster operator restart
# let's wait for operator pod to be restart
POD=$(kubectl -n test-operator get pod -l app=rook-ceph-operator -o jsonpath="{.items[0].metadata.name}")
kubectl -n test-operator wait --for=delete pod/$POD --timeout=100s
tests/github-action-helper.sh wait_for_operator_pod_to_be_ready_state_custom
- name: Debug Mode
run: |
set -ex
kubectl rook-ceph --operator-namespace test-operator -n test-cluster debug start rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0-debug test-cluster
kubectl rook-ceph --operator-namespace test-operator -n test-cluster debug stop rook-ceph-osd-0
tests/github-action-helper.sh wait_for_deployment_to_be_running rook-ceph-osd-0 test-cluster
- name: Purge Osd
run: |
set -ex
kubectl -n test-cluster scale deployment rook-ceph-osd-0 --replicas 0
kubectl rook-ceph --operator-namespace test-operator -n test-cluster rook purge-osd 0 --force
- name: collect common logs
if: always()
uses: ./.github/workflows/collect-logs
with:
name: go-test

- name: consider debugging
if: failure()
uses: mxschmitt/action-tmate@v3
7 changes: 5 additions & 2 deletions cmd/commands/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ var RootCmd = &cobra.Command{
Args: cobra.MinimumNArgs(1),
TraverseChildren: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if CephClusterNamespace != "" && OperatorNamespace == "" {
OperatorNamespace = CephClusterNamespace
}
// logging.Info("CephCluster namespace: %q", CephClusterNamespace)
// logging.Info("Rook operator namespace: %q", OperatorNamespace)
},
Expand All @@ -61,7 +64,7 @@ func init() {
// Define your flags and configuration settings.

RootCmd.PersistentFlags().StringVar(&KubeConfig, "kubeconfig", "", "kubernetes config path")
RootCmd.PersistentFlags().StringVar(&OperatorNamespace, "operator-namespace", "rook-ceph", "Kubernetes namespace where rook operator is running")
RootCmd.PersistentFlags().StringVar(&OperatorNamespace, "operator-namespace", "", "Kubernetes namespace where rook operator is running")
RootCmd.PersistentFlags().StringVarP(&CephClusterNamespace, "namespace", "n", "rook-ceph", "Kubernetes namespace where CephCluster is created")
}

Expand Down Expand Up @@ -103,7 +106,7 @@ func PreValidationCheck(ctx context.Context, k8sclientset *k8sutil.Clientsets, o
}
_, err = k8sclientset.Kube.CoreV1().Namespaces().Get(ctx, cephClusterNamespace, v1.GetOptions{})
if err != nil {
logging.Error(fmt.Errorf("CephCluster namespace '%s' does not exist. %v", cephClusterNamespace, err))
logging.Fatal(fmt.Errorf("CephCluster namespace '%s' does not exist. %v", cephClusterNamespace, err))
}

rookVersionOutput := exec.RunCommandInOperatorPod(ctx, k8sclientset, "rook", []string{"version"}, operatorNamespace, cephClusterNamespace, true, false)
Expand Down
6 changes: 3 additions & 3 deletions pkg/debug/start_debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,17 +109,17 @@ func startDebug(ctx context.Context, k8sclientset kubernetes.Interface, clusterN
return nil
}

func SetDeploymentScale(ctx context.Context, k8sclientset kubernetes.Interface, clusterNamespace, deploymentName string, scaleCount int) error {
func SetDeploymentScale(ctx context.Context, k8sclientset kubernetes.Interface, namespace, deploymentName string, scaleCount int) error {
scale := &autoscalingv1.Scale{
ObjectMeta: v1.ObjectMeta{
Name: deploymentName,
Namespace: clusterNamespace,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: int32(scaleCount),
},
}
_, err := k8sclientset.AppsV1().Deployments(clusterNamespace).UpdateScale(ctx, deploymentName, scale, v1.UpdateOptions{})
_, err := k8sclientset.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scale, v1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update scale of deployment %s. %v\n", deploymentName, err)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/mons/restore_quorum.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func restoreQuorum(ctx context.Context, clientsets *k8sutil.Clientsets, operator
}
logging.Info(output)

err = debug.SetDeploymentScale(ctx, clientsets.Kube, clusterNamespace, "rook-ceph-operator", 1)
err = debug.SetDeploymentScale(ctx, clientsets.Kube, operatorNamespace, "rook-ceph-operator", 1)
if err != nil {
return fmt.Errorf("failed to start deployment rook-ceph-operator. %v", err)
}
Expand Down

0 comments on commit 519ca0b

Please sign in to comment.