Skip to content

Commit

Permalink
Add default behavior in config field description
Browse files Browse the repository at this point in the history
Signed-off-by: Aylei <rayingecho@gmail.com>
  • Loading branch information
aylei committed Dec 25, 2019
1 parent 51b6820 commit e0e37b6
Show file tree
Hide file tree
Showing 6 changed files with 1,473 additions and 794 deletions.
440 changes: 295 additions & 145 deletions manifests/crd.yaml

Large diffs are not rendered by default.

1,030 changes: 546 additions & 484 deletions pkg/apis/pingcap/v1alpha1/openapi_generated.go

Large diffs are not rendered by default.

72 changes: 56 additions & 16 deletions pkg/apis/pingcap/v1alpha1/pd_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,26 +33,14 @@ import (
// PDConfig is the configuration of pd-server
// +k8s:openapi-gen=true
type PDConfig struct {
// +optional
Version *bool `json:"-"`
// +optional
ConfigCheck *bool `json:"-"`

// +optional
ClientUrls string `toml:"client-urls,omitempty" json:"client-urls,omitempty"`
// +optional
PeerUrls string `toml:"peer-urls,omitempty" json:"peer-urls,omitempty"`
// +optional
AdvertiseClientUrls string `toml:"advertise-client-urls,omitempty" json:"advertise-client-urls,omitempty"`
// +optional
AdvertisePeerUrls string `toml:"advertise-peer-urls,omitempty" json:"advertise-peer-urls,omitempty"`

// +optional
Name string `toml:"name,omitempty" json:"name,omitempty"`
// +optional
DataDir string `toml:"data-dir,omitempty" json:"data-dir,omitempty"`
// +optional
ForceNewCluster *bool `json:"force-new-cluster,omitempty"`
// Optional: Defaults to true
// +optional
EnableGRPCGateway *bool `json:"enable-grpc-gateway,omitempty"`

Expand All @@ -69,6 +57,7 @@ type PDConfig struct {
// in etcd after lease time, etcd will expire the leader key
// and other servers can campaign the leader again.
// Etcd only supports seconds TTL, so here is second too.
// Optional: Defaults to 3
// +optional
LeaderLease *int64 `toml:"lease,omitempty" json:"lease,omitempty"`

Expand All @@ -83,15 +72,18 @@ type PDConfig struct {
LogLevelDeprecated string `toml:"log-level,omitempty" json:"log-level,omitempty"`

// TsoSaveInterval is the interval to save timestamp.
// Optional: Defaults to 3s
// +optional
TsoSaveInterval string `toml:"tso-save-interval,omitempty" json:"tso-save-interval,omitempty"`

// +optional
Metric *PDMetricConfig `toml:"metric,omitempty" json:"metric,omitempty"`

// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Schedule *PDScheduleConfig `toml:"schedule,omitempty" json:"schedule,omitempty"`

// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Replication *PDReplicationConfig `toml:"replication,omitempty" json:"replication,omitempty"`

Expand Down Expand Up @@ -130,6 +122,7 @@ type PDConfig struct {
// If enabled, Raft runs an additional election phase
// to check whether it would get enough votes to win
// an election, thus minimizing disruptions.
// Optional: Defaults to true
// +optional
PreVote *bool `toml:"enable-prevote,omitempty" json:"enable-prevote,omitempty"`

Expand All @@ -141,6 +134,7 @@ type PDConfig struct {

// NamespaceClassifier is for classifying stores/regions into different
// namespaces.
// Optional: Defaults to true
// +optional
NamespaceClassifier string `toml:"namespace-classifier,omitempty" json:"namespace-classifier,omitempty"`
}
Expand All @@ -149,6 +143,7 @@ type PDConfig struct {
// +k8s:openapi-gen=true
type PDLogConfig struct {
// Log level.
// Optional: Defaults to info
// +optional
Level string `toml:"level,omitempty" json:"level,omitempty"`
// Log format. one of json, text, or console.
Expand Down Expand Up @@ -183,16 +178,20 @@ type PDLogConfig struct {
// +k8s:openapi-gen=true
type PDReplicationConfig struct {
// MaxReplicas is the number of replicas for each region.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 3
// +optional
MaxReplicas *uint64 `toml:"max-replicas,omitempty" json:"max-replicas,omitempty"`

// The label keys specified the location of a store.
// The placement priorities is implied by the order of label keys.
// For example, ["zone", "rack"] means that we should place replicas to
// different zones first, then to different racks if we don't have enough zones.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
LocationLabels StringSlice `toml:"location-labels,omitempty" json:"location-labels,omitempty"`
// StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
StrictlyMatchLabel *bool `toml:"strictly-match-label,omitempty" json:"strictly-match-label,string,omitempty"`
}
Expand Down Expand Up @@ -225,48 +224,73 @@ type PDNamespaceConfig struct {
type PDScheduleConfig struct {
// If the snapshot count of one store is greater than this value,
// it will never be used as a source or target store.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 3
// +optional
MaxSnapshotCount *uint64 `toml:"max-snapshot-count,omitempty" json:"max-snapshot-count,omitempty"`
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 16
// +optional
MaxPendingPeerCount *uint64 `toml:"max-pending-peer-count,omitempty" json:"max-pending-peer-count,omitempty"`
// If both the size of region is smaller than MaxMergeRegionSize
// and the number of rows in region is smaller than MaxMergeRegionKeys,
// it will try to merge with adjacent regions.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 20
// +optional
MaxMergeRegionSize *uint64 `toml:"max-merge-region-size,omitempty" json:"max-merge-region-size,omitempty"`
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 200000
// +optional
MaxMergeRegionKeys *uint64 `toml:"max-merge-region-keys,omitempty" json:"max-merge-region-keys,omitempty"`
// SplitMergeInterval is the minimum interval time to permit merge after split.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 1h
// +optional
SplitMergeInterval string `toml:"split-merge-interval,omitempty" json:"split-merge-interval,omitempty"`
// PatrolRegionInterval is the interval for scanning region during patrol.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
PatrolRegionInterval string `toml:"patrol-region-interval,omitempty" json:"patrol-region-interval,omitempty"`
// MaxStoreDownTime is the max duration after which
// a store will be considered to be down if it hasn't reported heartbeats.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 30m
// +optional
MaxStoreDownTime string `toml:"max-store-down-time,omitempty" json:"max-store-down-time,omitempty"`
// LeaderScheduleLimit is the max coexist leader schedules.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 4
// +optional
LeaderScheduleLimit *uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit,omitempty"`
// RegionScheduleLimit is the max coexist region schedules.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 2048
// +optional
RegionScheduleLimit *uint64 `toml:"region-schedule-limit,omitempty" json:"region-schedule-limit,omitempty"`
// ReplicaScheduleLimit is the max coexist replica schedules.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 64
// +optional
ReplicaScheduleLimit *uint64 `toml:"replica-schedule-limit,omitempty" json:"replica-schedule-limit,omitempty"`
// MergeScheduleLimit is the max coexist merge schedules.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 8
// +optional
MergeScheduleLimit *uint64 `toml:"merge-schedule-limit,omitempty" json:"merge-schedule-limit,omitempty"`
// HotRegionScheduleLimit is the max coexist hot region schedules.
// Immutable, change should be made through pd-ctl after cluster creation
// Optional: Defaults to 4
// +optional
HotRegionScheduleLimit *uint64 `toml:"hot-region-schedule-limit,omitempty" json:"hot-region-schedule-limit,omitempty"`
// HotRegionCacheHitThreshold is the cache hits threshold of the hot region.
// If the number of times a region hits the hot cache is greater than this
// threshold, it is considered a hot region.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
HotRegionCacheHitsThreshold *uint64 `toml:"hot-region-cache-hits-threshold,omitempty" json:"hot-region-cache-hits-threshold,omitempty"`
// TolerantSizeRatio is the ratio of buffer size for balance scheduler.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
TolerantSizeRatio *float64 `toml:"tolerant-size-ratio,omitempty" json:"tolerant-size-ratio,omitempty"`
//
Expand All @@ -277,42 +301,52 @@ type PDScheduleConfig struct {
//
// LowSpaceRatio is the lowest usage ratio of store which regraded as low space.
// When in low space, store region score increases to very large and varies inversely with available size.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
LowSpaceRatio *float64 `toml:"low-space-ratio,omitempty" json:"low-space-ratio,omitempty"`
// HighSpaceRatio is the highest usage ratio of store which regraded as high space.
// High space means there is a lot of spare capacity, and store region score varies directly with used size.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
HighSpaceRatio *float64 `toml:"high-space-ratio,omitempty" json:"high-space-ratio,omitempty"`
// DisableLearner is the option to disable using AddLearnerNode instead of AddNode
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableLearner *bool `toml:"disable-raft-learner,omitempty" json:"disable-raft-learner,string,omitempty"`

// DisableRemoveDownReplica is the option to prevent replica checker from
// removing down replicas.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableRemoveDownReplica *bool `toml:"disable-remove-down-replica,omitempty" json:"disable-remove-down-replica,string,omitempty"`
// DisableReplaceOfflineReplica is the option to prevent replica checker from
// repalcing offline replicas.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableReplaceOfflineReplica *bool `toml:"disable-replace-offline-replica,omitempty" json:"disable-replace-offline-replica,string,omitempty"`
// DisableMakeUpReplica is the option to prevent replica checker from making up
// replicas when replica count is less than expected.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableMakeUpReplica *bool `toml:"disable-make-up-replica,omitempty" json:"disable-make-up-replica,string,omitempty"`
// DisableRemoveExtraReplica is the option to prevent replica checker from
// removing extra replicas.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableRemoveExtraReplica *bool `toml:"disable-remove-extra-replica,omitempty" json:"disable-remove-extra-replica,string,omitempty"`
// DisableLocationReplacement is the option to prevent replica checker from
// moving replica to a better location.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableLocationReplacement *bool `toml:"disable-location-replacement,omitempty" json:"disable-location-replacement,string,omitempty"`
// DisableNamespaceRelocation is the option to prevent namespace checker
// from moving replica to the target namespace.
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
DisableNamespaceRelocation *bool `toml:"disable-namespace-relocation,omitempty" json:"disable-namespace-relocation,string,omitempty"`

// Schedulers support for loding customized schedulers
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Schedulers *PDSchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2,omitempty"` // json v2 is for the sake of compatible upgrade
}
Expand All @@ -322,10 +356,13 @@ type PDSchedulerConfigs []PDSchedulerConfig
// PDSchedulerConfig is customized scheduler configuration
// +k8s:openapi-gen=true
type PDSchedulerConfig struct {
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Type string `toml:"type,omitempty" json:"type,omitempty"`
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Args []string `toml:"args,omitempty" json:"args,omitempty"`
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Disable *bool `toml:"disable,omitempty" json:"disable,omitempty"`
}
Expand Down Expand Up @@ -367,9 +404,12 @@ type PDServerConfig struct {

// +k8s:openapi-gen=true
type PDMetricConfig struct {
PushJob string `toml:"job,omitempty" json:"job,omitempty"`
PushAddress string `toml:"address,omitempty" json:"address,omitempty"`
PushInterval string `toml:"interval,omitempty" json:"interval,omitempty"`
// +optional
PushJob *string `toml:"job,omitempty" json:"job,omitempty"`
// +optional
PushAddress *string `toml:"address,omitempty" json:"address,omitempty"`
// +optional
PushInterval *string `toml:"interval,omitempty" json:"interval,omitempty"`
}

// +k8s:openapi-gen=true
Expand Down
Loading

0 comments on commit e0e37b6

Please sign in to comment.