From f93fcc6499f0ec98410b3efb6330a61bfd143683 Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 8 Apr 2020 21:59:09 +0800 Subject: [PATCH] Remove unnecessary informer caches (#1504) --- cmd/controller-manager/main.go | 20 +++++++------------ pkg/backup/backup/backup_cleaner.go | 10 +++++----- pkg/backup/backup/backup_manager.go | 15 +++++++------- pkg/backup/restore/restore_manager.go | 15 +++++++------- pkg/backup/util/util.go | 13 ++++++------ pkg/controller/backup/backup_controller.go | 5 ++--- pkg/controller/configmap_control.go | 8 +------- pkg/controller/configmap_control_test.go | 18 ++++++----------- pkg/controller/restore/restore_controller.go | 3 +-- pkg/controller/secret_control.go | 14 ++++--------- .../tidbcluster/tidb_cluster_controller.go | 8 ++------ pkg/manager/member/pd_member_manager_test.go | 3 +-- pkg/manager/member/pump_member_manager.go | 3 --- .../member/pump_member_manager_test.go | 4 +--- pkg/manager/member/tidb_member_manager.go | 2 -- .../member/tidb_member_manager_test.go | 2 +- 16 files changed, 54 insertions(+), 89 deletions(-) diff --git a/cmd/controller-manager/main.go b/cmd/controller-manager/main.go index 8144f2d73a..aca09196a9 100644 --- a/cmd/controller-manager/main.go +++ b/cmd/controller-manager/main.go @@ -148,20 +148,14 @@ func main() { var informerFactory informers.SharedInformerFactory var kubeInformerFactory kubeinformers.SharedInformerFactory - if controller.ClusterScoped { - informerFactory = informers.NewSharedInformerFactory(cli, controller.ResyncDuration) - kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeCli, controller.ResyncDuration) - } else { - options := []informers.SharedInformerOption{ - informers.WithNamespace(ns), - } - informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...) - - kubeoptions := []kubeinformers.SharedInformerOption{ - kubeinformers.WithNamespace(ns), - } - kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...) + var options []informers.SharedInformerOption + var kubeoptions []kubeinformers.SharedInformerOption + if !controller.ClusterScoped { + options = append(options, informers.WithNamespace(ns)) + kubeoptions = append(kubeoptions, kubeinformers.WithNamespace(ns)) } + informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...) + kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...) rl := resourcelock.EndpointsLock{ EndpointsMeta: metav1.ObjectMeta{ diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go index 3f29ab65f6..81df6426e6 100644 --- a/pkg/backup/backup/backup_cleaner.go +++ b/pkg/backup/backup/backup_cleaner.go @@ -24,8 +24,8 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog" ) @@ -36,7 +36,7 @@ type BackupCleaner interface { type backupCleaner struct { statusUpdater controller.BackupConditionUpdaterInterface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface jobLister batchlisters.JobLister jobControl controller.JobControlInterface } @@ -44,12 +44,12 @@ type backupCleaner struct { // NewBackupCleaner returns a BackupCleaner func NewBackupCleaner( statusUpdater controller.BackupConditionUpdaterInterface, - secretLister corelisters.SecretLister, + kubeCli kubernetes.Interface, jobLister batchlisters.JobLister, jobControl controller.JobControlInterface) BackupCleaner { return &backupCleaner{ statusUpdater, - secretLister, + kubeCli, jobLister, jobControl, } @@ -112,7 +112,7 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st ns := backup.GetNamespace() name := backup.GetName() - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.kubeCli) if err != nil { return nil, reason, err } diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go index 4c1eff1099..f692d17225 100644 --- a/pkg/backup/backup/backup_manager.go +++ b/pkg/backup/backup/backup_manager.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" corelisters "k8s.io/client-go/listers/core/v1" ) @@ -36,7 +37,7 @@ import ( type backupManager struct { backupCleaner BackupCleaner statusUpdater controller.BackupConditionUpdaterInterface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface jobLister batchlisters.JobLister jobControl controller.JobControlInterface pvcLister corelisters.PersistentVolumeClaimLister @@ -48,7 +49,7 @@ type backupManager struct { func NewBackupManager( backupCleaner BackupCleaner, statusUpdater controller.BackupConditionUpdaterInterface, - secretLister corelisters.SecretLister, + kubeCli kubernetes.Interface, jobLister batchlisters.JobLister, jobControl controller.JobControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, @@ -58,7 +59,7 @@ func NewBackupManager( return &backupManager{ backupCleaner, statusUpdater, - secretLister, + kubeCli, jobLister, jobControl, pvcLister, @@ -168,12 +169,12 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s ns := backup.GetNamespace() name := backup.GetName() - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err) } @@ -268,12 +269,12 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", backupNamespace, backup.Spec.BR.Cluster), err } - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err) } diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go index ec8e63464d..5f0d971186 100644 --- a/pkg/backup/restore/restore_manager.go +++ b/pkg/backup/restore/restore_manager.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" corelisters "k8s.io/client-go/listers/core/v1" ) @@ -37,7 +38,7 @@ import ( type restoreManager struct { backupLister listers.BackupLister statusUpdater controller.RestoreConditionUpdaterInterface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface jobLister batchlisters.JobLister jobControl controller.JobControlInterface pvcLister corelisters.PersistentVolumeClaimLister @@ -49,7 +50,7 @@ type restoreManager struct { func NewRestoreManager( backupLister listers.BackupLister, statusUpdater controller.RestoreConditionUpdaterInterface, - secretLister corelisters.SecretLister, + kubeCli kubernetes.Interface, jobLister batchlisters.JobLister, jobControl controller.JobControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, @@ -59,7 +60,7 @@ func NewRestoreManager( return &restoreManager{ backupLister, statusUpdater, - secretLister, + kubeCli, jobLister, jobControl, pvcLister, @@ -159,12 +160,12 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job ns := restore.GetNamespace() name := restore.GetName() - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err) } @@ -253,12 +254,12 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", restoreNamespace, restore.Spec.BR.Cluster), err } - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err) } diff --git a/pkg/backup/util/util.go b/pkg/backup/util/util.go index 066609960b..05a8ec1fbe 100644 --- a/pkg/backup/util/util.go +++ b/pkg/backup/util/util.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/backup/constants" corev1 "k8s.io/api/core/v1" - corelisters "k8s.io/client-go/listers/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) // CheckAllKeysExistInSecret check if all keys are included in the specific secret @@ -160,7 +161,7 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s } // GenerateStorageCertEnv generate the env info in order to access backend backup storage -func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) { +func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) { var certEnv []corev1.EnvVar var reason string var err error @@ -174,7 +175,7 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro s3SecretName := provider.S3.SecretName if s3SecretName != "" { - secret, err := secretLister.Secrets(ns).Get(s3SecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(s3SecretName, metav1.GetOptions{}) if err != nil { err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err) return certEnv, "GetS3SecretFailed", err @@ -196,7 +197,7 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro return certEnv, "GcsConfigIsEmpty", errors.New("gcs config is empty") } gcsSecretName := provider.Gcs.SecretName - secret, err := secretLister.Secrets(ns).Get(gcsSecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(gcsSecretName, metav1.GetOptions{}) if err != nil { err := fmt.Errorf("get gcs secret %s/%s failed, err: %v", ns, gcsSecretName, err) return certEnv, "GetGcsSecretFailed", err @@ -221,10 +222,10 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro } // GenerateTidbPasswordEnv generate the password EnvVar -func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) { +func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) { var certEnv []corev1.EnvVar var passwordKey string - secret, err := secretLister.Secrets(ns).Get(tidbSecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(tidbSecretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err) return certEnv, "GetTidbSecretFailed", err diff --git a/pkg/controller/backup/backup_controller.go b/pkg/controller/backup/backup_controller.go index 6418f931c3..e660649581 100644 --- a/pkg/controller/backup/backup_controller.go +++ b/pkg/controller/backup/backup_controller.go @@ -71,11 +71,10 @@ func NewController( tcInformer := informerFactory.Pingcap().V1alpha1().TidbClusters() jobInformer := kubeInformerFactory.Batch().V1().Jobs() pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() - secretInformer := kubeInformerFactory.Core().V1().Secrets() statusUpdater := controller.NewRealBackupConditionUpdater(cli, backupInformer.Lister(), recorder) jobControl := controller.NewRealJobControl(kubeCli, recorder) pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder) - backupCleaner := backup.NewBackupCleaner(statusUpdater, secretInformer.Lister(), jobInformer.Lister(), jobControl) + backupCleaner := backup.NewBackupCleaner(statusUpdater, kubeCli, jobInformer.Lister(), jobControl) bkc := &Controller{ kubeClient: kubeCli, @@ -85,7 +84,7 @@ func NewController( backup.NewBackupManager( backupCleaner, statusUpdater, - secretInformer.Lister(), + kubeCli, jobInformer.Lister(), jobControl, pvcInformer.Lister(), diff --git a/pkg/controller/configmap_control.go b/pkg/controller/configmap_control.go index 6e05755b58..137e227b1a 100644 --- a/pkg/controller/configmap_control.go +++ b/pkg/controller/configmap_control.go @@ -23,7 +23,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" @@ -44,19 +43,16 @@ type ConfigMapControlInterface interface { type realConfigMapControl struct { client client.Client kubeCli kubernetes.Interface - cmLister corelisters.ConfigMapLister recorder record.EventRecorder } // NewRealSecretControl creates a new SecretControlInterface func NewRealConfigMapControl( kubeCli kubernetes.Interface, - cmLister corelisters.ConfigMapLister, recorder record.EventRecorder, ) ConfigMapControlInterface { return &realConfigMapControl{ kubeCli: kubeCli, - cmLister: cmLister, recorder: recorder, } } @@ -81,7 +77,7 @@ func (cc *realConfigMapControl) UpdateConfigMap(owner runtime.Object, cm *corev1 return nil } - if updated, err := cc.cmLister.ConfigMaps(cm.Namespace).Get(cmName); err != nil { + if updated, err := cc.kubeCli.CoreV1().ConfigMaps(cm.Namespace).Get(cmName, metav1.GetOptions{}); err != nil { utilruntime.HandleError(fmt.Errorf("error getting updated ConfigMap %s/%s from lister: %v", ns, cmName, err)) } else { cm = updated.DeepCopy() @@ -124,7 +120,6 @@ var _ ConfigMapControlInterface = &realConfigMapControl{} // NewFakeConfigMapControl returns a FakeConfigMapControl func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeConfigMapControl { return &FakeConfigMapControl{ - cmInformer.Lister(), cmInformer.Informer().GetIndexer(), RequestTracker{}, RequestTracker{}, @@ -134,7 +129,6 @@ func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeCo // FakeConfigMapControl is a fake ConfigMapControlInterface type FakeConfigMapControl struct { - CmLister corelisters.ConfigMapLister CmIndexer cache.Indexer createConfigMapTracker RequestTracker updateConfigMapTracker RequestTracker diff --git a/pkg/controller/configmap_control_test.go b/pkg/controller/configmap_control_test.go index ffdfb8ad88..47fb0ee7b2 100644 --- a/pkg/controller/configmap_control_test.go +++ b/pkg/controller/configmap_control_test.go @@ -23,9 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" - corelisters "k8s.io/client-go/listers/core/v1" core "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" ) @@ -35,7 +33,7 @@ func TestConfigMapControlCreatesConfigMaps(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) { create := action.(core.CreateAction) return true, create.GetObject(), nil @@ -54,7 +52,7 @@ func TestConfigMapControlCreatesConfigMapFailed(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) @@ -73,7 +71,7 @@ func TestConfigMapControlUpdateConfigMap(t *testing.T) { cm := newConfigMap() cm.Data["file"] = "test" fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) return true, update.GetObject(), nil @@ -90,13 +88,9 @@ func TestConfigMapControlUpdateConfigMapConflictSuccess(t *testing.T) { cm := newConfigMap() cm.Data["file"] = "test" fakeClient := &fake.Clientset{} - indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) oldcm := newConfigMap() oldcm.Data["file"] = "test2" - err := indexer.Add(oldcm) - g.Expect(err).To(Succeed()) - cmLister := corelisters.NewConfigMapLister(indexer) - control := NewRealConfigMapControl(fakeClient, cmLister, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) conflict := false fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) @@ -117,7 +111,7 @@ func TestConfigMapControlDeleteConfigMap(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, nil }) @@ -134,7 +128,7 @@ func TestConfigMapControlDeleteConfigMapFailed(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) diff --git a/pkg/controller/restore/restore_controller.go b/pkg/controller/restore/restore_controller.go index dd763eda90..aaeeb3dce1 100644 --- a/pkg/controller/restore/restore_controller.go +++ b/pkg/controller/restore/restore_controller.go @@ -72,7 +72,6 @@ func NewController( backupInformer := informerFactory.Pingcap().V1alpha1().Backups() jobInformer := kubeInformerFactory.Batch().V1().Jobs() pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() - secretInformer := kubeInformerFactory.Core().V1().Secrets() statusUpdater := controller.NewRealRestoreConditionUpdater(cli, restoreInformer.Lister(), recorder) jobControl := controller.NewRealJobControl(kubeCli, recorder) pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder) @@ -84,7 +83,7 @@ func NewController( restore.NewRestoreManager( backupInformer.Lister(), statusUpdater, - secretInformer.Lister(), + kubeCli, jobInformer.Lister(), jobControl, pvcInformer.Lister(), diff --git a/pkg/controller/secret_control.go b/pkg/controller/secret_control.go index cb746fd4e7..4b443574bb 100644 --- a/pkg/controller/secret_control.go +++ b/pkg/controller/secret_control.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog" ) @@ -38,18 +37,15 @@ type SecretControlInterface interface { } type realSecretControl struct { - kubeCli kubernetes.Interface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface } // NewRealSecretControl creates a new SecretControlInterface func NewRealSecretControl( kubeCli kubernetes.Interface, - secretLister corelisters.SecretLister, ) SecretControlInterface { return &realSecretControl{ - kubeCli: kubeCli, - secretLister: secretLister, + kubeCli: kubeCli, } } @@ -81,7 +77,7 @@ func (rsc *realSecretControl) Create(or metav1.OwnerReference, certOpts *TiDBClu // Load loads cert and key from Secret matching the name func (rsc *realSecretControl) Load(ns string, secretName string) ([]byte, []byte, error) { - secret, err := rsc.secretLister.Secrets(ns).Get(secretName) + secret, err := rsc.kubeCli.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -145,10 +141,8 @@ type FakeSecretControl struct { func NewFakeSecretControl( kubeCli kubernetes.Interface, - secretLister corelisters.SecretLister, ) SecretControlInterface { return &realSecretControl{ - kubeCli: kubeCli, - secretLister: secretLister, + kubeCli: kubeCli, } } diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index 0c0963c779..dba0a9e7bb 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -91,19 +91,17 @@ func NewController( podInformer := kubeInformerFactory.Core().V1().Pods() nodeInformer := kubeInformerFactory.Core().V1().Nodes() csrInformer := kubeInformerFactory.Certificates().V1beta1().CertificateSigningRequests() - secretInformer := kubeInformerFactory.Core().V1().Secrets() - cmInformer := kubeInformerFactory.Core().V1().ConfigMaps() tcControl := controller.NewRealTidbClusterControl(cli, tcInformer.Lister(), recorder) pdControl := pdapi.NewDefaultPDControl(kubeCli) tidbControl := controller.NewDefaultTiDBControl(kubeCli) - cmControl := controller.NewRealConfigMapControl(kubeCli, cmInformer.Lister(), recorder) + cmControl := controller.NewRealConfigMapControl(kubeCli, recorder) setControl := controller.NewRealStatefuSetControl(kubeCli, setInformer.Lister(), recorder) svcControl := controller.NewRealServiceControl(kubeCli, svcInformer.Lister(), recorder) pvControl := controller.NewRealPVControl(kubeCli, pvcInformer.Lister(), pvInformer.Lister(), recorder) pvcControl := controller.NewRealPVCControl(kubeCli, recorder, pvcInformer.Lister()) podControl := controller.NewRealPodControl(kubeCli, pdControl, podInformer.Lister(), recorder) - secControl := controller.NewRealSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewRealSecretControl(kubeCli) certControl := controller.NewRealCertControl(kubeCli, csrInformer.Lister(), secControl) typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder)) pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl) @@ -162,7 +160,6 @@ func NewController( setInformer.Lister(), svcInformer.Lister(), podInformer.Lister(), - cmInformer.Lister(), tidbUpgrader, autoFailover, tidbFailover, @@ -202,7 +199,6 @@ func NewController( cmControl, setInformer.Lister(), svcInformer.Lister(), - cmInformer.Lister(), podInformer.Lister(), ), mm.NewTidbDiscoveryManager(typedControl), diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index 308100e239..b2d5dc46f7 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -747,12 +747,11 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims() tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() csrInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Certificates().V1beta1().CertificateSigningRequests() - secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets() setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) podControl := controller.NewFakePodControl(podInformer) pdControl := pdapi.NewFakePDControl(kubeCli) - secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewFakeSecretControl(kubeCli) certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl) pdScaler := NewFakePDScaler() autoFailover := true diff --git a/pkg/manager/member/pump_member_manager.go b/pkg/manager/member/pump_member_manager.go index 3bd15e3088..ef4eaaa835 100644 --- a/pkg/manager/member/pump_member_manager.go +++ b/pkg/manager/member/pump_member_manager.go @@ -48,7 +48,6 @@ type pumpMemberManager struct { cmControl controller.ConfigMapControlInterface setLister v1.StatefulSetLister svcLister corelisters.ServiceLister - cmLister corelisters.ConfigMapLister podLister corelisters.PodLister } @@ -61,7 +60,6 @@ func NewPumpMemberManager( cmControl controller.ConfigMapControlInterface, setLister v1.StatefulSetLister, svcLister corelisters.ServiceLister, - cmLister corelisters.ConfigMapLister, podLister corelisters.PodLister) manager.Manager { return &pumpMemberManager{ certControl, @@ -71,7 +69,6 @@ func NewPumpMemberManager( cmControl, setLister, svcLister, - cmLister, podLister, } } diff --git a/pkg/manager/member/pump_member_manager_test.go b/pkg/manager/member/pump_member_manager_test.go index c43e4475d3..69b73ee6e4 100644 --- a/pkg/manager/member/pump_member_manager_test.go +++ b/pkg/manager/member/pump_member_manager_test.go @@ -441,13 +441,12 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() - secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets() csrInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Certificates().V1beta1().CertificateSigningRequests() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewFakeSecretControl(kubeCli) certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl) svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) cmControl := controller.NewFakeConfigMapControl(cmInformer) @@ -460,7 +459,6 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak cmControl, setInformer.Lister(), svcInformer.Lister(), - cmInformer.Lister(), podInformer.Lister(), } controls := &pumpFakeControls{ diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index e47e0d058b..fc74272f13 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -79,7 +79,6 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, setLister v1.StatefulSetLister, svcLister corelisters.ServiceLister, podLister corelisters.PodLister, - cmLister corelisters.ConfigMapLister, tidbUpgrader Upgrader, autoFailover bool, tidbFailover Failover) manager.Manager { @@ -92,7 +91,6 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, setLister: setLister, svcLister: svcLister, podLister: podLister, - cmLister: cmLister, tidbUpgrader: tidbUpgrader, autoFailover: autoFailover, tidbFailover: tidbFailover, diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index ad18cefb95..ba01f3db38 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -772,7 +772,7 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) - secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewFakeSecretControl(kubeCli) certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl) genericControl := controller.NewFakeGenericControl() tidbUpgrader := NewFakeTiDBUpgrader()