diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go
index d31d376d37..88bb86c987 100644
--- a/cmd/cinder-csi-plugin/main.go
+++ b/cmd/cinder-csi-plugin/main.go
@@ -43,6 +43,7 @@ var (
provideControllerService bool
provideNodeService bool
noClient bool
+ withTopology bool
)
func main() {
@@ -87,6 +88,8 @@ func main() {
cmd.Flags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")
+ cmd.PersistentFlags().BoolVar(&withTopology, "with-topology", true, "cluster is topology-aware")
+
cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")
@@ -107,9 +110,10 @@ func main() {
func handle() {
// Initialize cloud
d := cinder.NewDriver(&cinder.DriverOpts{
- Endpoint: endpoint,
- ClusterID: cluster,
- PVCLister: csi.GetPVCLister(),
+ Endpoint: endpoint,
+ ClusterID: cluster,
+ PVCLister: csi.GetPVCLister(),
+ WithTopology: withTopology,
})
openstack.InitOpenStackProvider(cloudConfig, httpEndpoint)
diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
index 52b450356d..389008067c 100644
--- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
+++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
@@ -70,6 +70,13 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
The manifests default this to `unix://csi/csi.sock`, which is supplied via the `CSI_ENDPOINT` environment variable.
+
--with-topology <enabled>
+
+ If set to true then the CSI driver reports topology information and the controller respects it.
+
+ Defaults to `true` (enabled).
+
+
--cloud-config <config file> [--cloud-config <config file> ...]
This argument must be given at least once.
@@ -101,16 +108,17 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
--provide-controller-service <enabled>
- If set to true then the CSI driver does provide the controller service.
+ If set to true then the CSI driver provides the controller service.
- The default is to provide the controller service.
+ Defaults to `true` (enabled).
--provide-node-service <enabled>
- If set to true then the CSI driver does provide the node service.
+ If set to true then the CSI driver provides the node service.
- The default is to provide the node service.
+ Defaults to `true` (enabled).
+
--pvc-annotations <disabled>
@@ -118,7 +126,7 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
scheduler hints. See [Supported PVC Annotations](#supported-pvc-annotations)
for more information.
- The default is not to provide the PVC annotations support.
+ Defaults to `false` (disabled).
diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go
index 43f54305ec..32183f9685 100644
--- a/pkg/csi/cinder/controllerserver.go
+++ b/pkg/csi/cinder/controllerserver.go
@@ -84,13 +84,17 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// Volume Type
volType := volParams["type"]
- // First check if volAvailability is already specified, if not get preferred from Topology
- // Required, incase vol AZ is different from node AZ
- volAvailability := volParams["availability"]
- if volAvailability == "" {
- // Check from Topology
- if req.GetAccessibilityRequirements() != nil {
- volAvailability = sharedcsi.GetAZFromTopology(topologyKey, req.GetAccessibilityRequirements())
+ var volAvailability string
+ if cs.Driver.withTopology {
+ // First check if volAvailability is already specified, if not get preferred from Topology
+ // Required, incase vol AZ is different from node AZ
+ volAvailability = volParams["availability"]
+ if volAvailability == "" {
+ accessibleTopologyReq := req.GetAccessibilityRequirements()
+ // Check from Topology
+ if accessibleTopologyReq != nil {
+ volAvailability = sharedcsi.GetAZFromTopology(topologyKey, accessibleTopologyReq)
+ }
}
}
diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go
index d04131ccae..8ba39b7a3f 100644
--- a/pkg/csi/cinder/controllerserver_test.go
+++ b/pkg/csi/cinder/controllerserver_test.go
@@ -39,7 +39,7 @@ func init() {
osmock = new(openstack.OpenStackMock)
osmockRegionX = new(openstack.OpenStackMock)
- d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
+ d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})
fakeCs = NewControllerServer(d, map[string]openstack.IOpenStack{
"": osmock,
diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go
index 85ceea0a1f..04356a0eba 100644
--- a/pkg/csi/cinder/driver.go
+++ b/pkg/csi/cinder/driver.go
@@ -63,10 +63,11 @@ type CinderDriver = Driver
//revive:enable:exported
type Driver struct {
- name string
- fqVersion string //Fully qualified version in format {Version}@{CPO version}
- endpoint string
- clusterID string
+ name string
+ fqVersion string //Fully qualified version in format {Version}@{CPO version}
+ endpoint string
+ clusterID string
+ withTopology bool
ids *identityServer
cs *controllerServer
@@ -80,23 +81,27 @@ type Driver struct {
}
type DriverOpts struct {
- ClusterID string
- Endpoint string
+ ClusterID string
+ Endpoint string
+ WithTopology bool
+
PVCLister v1.PersistentVolumeClaimLister
}
func NewDriver(o *DriverOpts) *Driver {
d := &Driver{
- name: driverName,
- fqVersion: fmt.Sprintf("%s@%s", Version, version.Version),
- endpoint: o.Endpoint,
- clusterID: o.ClusterID,
- pvcLister: o.PVCLister,
+ name: driverName,
+ fqVersion: fmt.Sprintf("%s@%s", Version, version.Version),
+ endpoint: o.Endpoint,
+ clusterID: o.ClusterID,
+ withTopology: o.WithTopology,
+ pvcLister: o.PVCLister,
}
klog.Info("Driver: ", d.name)
klog.Info("Driver version: ", d.fqVersion)
klog.Info("CSI Spec version: ", specVersion)
+ klog.Infof("Topology awareness: %T", d.withTopology)
d.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go
index 49d56e521b..0b549c120e 100644
--- a/pkg/csi/cinder/nodeserver.go
+++ b/pkg/csi/cinder/nodeserver.go
@@ -286,12 +286,20 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
}
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
-
nodeID, err := ns.Metadata.GetInstanceID()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] unable to retrieve instance id of node %v", err)
}
+ nodeInfo := &csi.NodeGetInfoResponse{
+ NodeId: nodeID,
+ MaxVolumesPerNode: ns.Opts.NodeVolumeAttachLimit,
+ }
+
+ if !ns.Driver.withTopology {
+ return nodeInfo, nil
+ }
+
zone, err := ns.Metadata.GetAvailabilityZone()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] Unable to retrieve availability zone of node %v", err)
@@ -301,13 +309,9 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
for k, v := range ns.Topologies {
topologyMap[k] = v
}
- topology := &csi.Topology{Segments: topologyMap}
+ nodeInfo.AccessibleTopology = &csi.Topology{Segments: topologyMap}
- return &csi.NodeGetInfoResponse{
- NodeId: nodeID,
- AccessibleTopology: topology,
- MaxVolumesPerNode: ns.Opts.NodeVolumeAttachLimit,
- }, nil
+ return nodeInfo, nil
}
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go
index b3fae1e9c7..4a461817c2 100644
--- a/pkg/csi/cinder/nodeserver_test.go
+++ b/pkg/csi/cinder/nodeserver_test.go
@@ -41,7 +41,7 @@ var omock *openstack.OpenStackMock
func init() {
if fakeNs == nil {
- d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
+ d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})
// mock MountMock
mmock = new(mount.MountMock)
diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go
index c404a4c102..62cdc91b15 100644
--- a/tests/sanity/cinder/sanity_test.go
+++ b/tests/sanity/cinder/sanity_test.go
@@ -19,7 +19,7 @@ func TestDriver(t *testing.T) {
endpoint := "unix://" + socket
cluster := "kubernetes"
- d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
+ d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster, WithTopology: true})
fakecloudprovider := getfakecloud()
openstack.OsInstances = map[string]openstack.IOpenStack{