Skip to content

Commit

Permalink
cinder-csi-plugin: Add --with-topology option (#2743)
Browse files Browse the repository at this point in the history
Many clouds do not have a 1:1 mapping of compute and storage AZs. As we
generate topology information from the metadata service on the node
(i.e. a compute AZ), this can prevent us from being able to schedule
VMs.

Add a new boolean, '--with-topology', to allow users to disable the
topology feature where it does not make sense. An identical option
already exists for the Manila CSI driver. However, unlike that option,
this one defaults to 'true' to retain current behavior.

Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
  • Loading branch information
stephenfin authored Dec 11, 2024
1 parent 324a883 commit ac23a1e
Show file tree
Hide file tree
Showing 8 changed files with 61 additions and 36 deletions.
10 changes: 7 additions & 3 deletions cmd/cinder-csi-plugin/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ var (
provideControllerService bool
provideNodeService bool
noClient bool
withTopology bool
)

func main() {
Expand Down Expand Up @@ -87,6 +88,8 @@ func main() {

cmd.Flags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")

cmd.PersistentFlags().BoolVar(&withTopology, "with-topology", true, "cluster is topology-aware")

cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")

Expand All @@ -107,9 +110,10 @@ func main() {
func handle() {
// Initialize cloud
d := cinder.NewDriver(&cinder.DriverOpts{
Endpoint: endpoint,
ClusterID: cluster,
PVCLister: csi.GetPVCLister(),
Endpoint: endpoint,
ClusterID: cluster,
PVCLister: csi.GetPVCLister(),
WithTopology: withTopology,
})

openstack.InitOpenStackProvider(cloudConfig, httpEndpoint)
Expand Down
18 changes: 13 additions & 5 deletions docs/cinder-csi-plugin/using-cinder-csi-plugin.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,13 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
The manifests default this to `unix://csi/csi.sock`, which is supplied via the `CSI_ENDPOINT` environment variable.
</dd>

<dt>--with-topology &lt;enabled&gt;</dt>
<dd>
If set to true then the CSI driver reports topology information and the controller respects it.

Defaults to `true` (enabled).
</dd>

<dt>--cloud-config &lt;config file&gt; [--cloud-config &lt;config file&gt; ...]</dt>
<dd>
This argument must be given at least once.
Expand Down Expand Up @@ -101,24 +108,25 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f

<dt>--provide-controller-service &lt;enabled&gt;</dt>
<dd>
If set to true then the CSI driver does provide the controller service.
If set to true then the CSI driver provides the controller service.

The default is to provide the controller service.
Defaults to `true` (enabled).
</dd>

<dt>--provide-node-service &lt;enabled&gt;</dt>
<dd>
If set to true then the CSI driver does provide the node service.
If set to true then the CSI driver provides the node service.

The default is to provide the node service.
Defaults to `true` (enabled).
</dd>

<dt>--pvc-annotations &lt;disabled&gt;</dt>
<dd>
If set to true then the CSI driver will use PVC annotations to provide volume
scheduler hints. See [Supported PVC Annotations](#supported-pvc-annotations)
for more information.

The default is not to provide the PVC annotations support.
Defaults to `false` (disabled).
</dd>
</dl>

Expand Down
18 changes: 11 additions & 7 deletions pkg/csi/cinder/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,17 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// Volume Type
volType := volParams["type"]

// First check if volAvailability is already specified, if not get preferred from Topology
// Required, incase vol AZ is different from node AZ
volAvailability := volParams["availability"]
if volAvailability == "" {
// Check from Topology
if req.GetAccessibilityRequirements() != nil {
volAvailability = sharedcsi.GetAZFromTopology(topologyKey, req.GetAccessibilityRequirements())
var volAvailability string
if cs.Driver.withTopology {
// First check if volAvailability is already specified, if not get preferred from Topology
// Required, incase vol AZ is different from node AZ
volAvailability = volParams["availability"]
if volAvailability == "" {
accessibleTopologyReq := req.GetAccessibilityRequirements()
// Check from Topology
if accessibleTopologyReq != nil {
volAvailability = sharedcsi.GetAZFromTopology(topologyKey, accessibleTopologyReq)
}
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/csi/cinder/controllerserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func init() {
osmock = new(openstack.OpenStackMock)
osmockRegionX = new(openstack.OpenStackMock)

d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})

fakeCs = NewControllerServer(d, map[string]openstack.IOpenStack{
"": osmock,
Expand Down
27 changes: 16 additions & 11 deletions pkg/csi/cinder/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,11 @@ type CinderDriver = Driver
//revive:enable:exported

type Driver struct {
name string
fqVersion string //Fully qualified version in format {Version}@{CPO version}
endpoint string
clusterID string
name string
fqVersion string //Fully qualified version in format {Version}@{CPO version}
endpoint string
clusterID string
withTopology bool

ids *identityServer
cs *controllerServer
Expand All @@ -80,23 +81,27 @@ type Driver struct {
}

type DriverOpts struct {
ClusterID string
Endpoint string
ClusterID string
Endpoint string
WithTopology bool

PVCLister v1.PersistentVolumeClaimLister
}

func NewDriver(o *DriverOpts) *Driver {
d := &Driver{
name: driverName,
fqVersion: fmt.Sprintf("%s@%s", Version, version.Version),
endpoint: o.Endpoint,
clusterID: o.ClusterID,
pvcLister: o.PVCLister,
name: driverName,
fqVersion: fmt.Sprintf("%s@%s", Version, version.Version),
endpoint: o.Endpoint,
clusterID: o.ClusterID,
withTopology: o.WithTopology,
pvcLister: o.PVCLister,
}

klog.Info("Driver: ", d.name)
klog.Info("Driver version: ", d.fqVersion)
klog.Info("CSI Spec version: ", specVersion)
klog.Infof("Topology awareness: %T", d.withTopology)

d.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
Expand Down
18 changes: 11 additions & 7 deletions pkg/csi/cinder/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,12 +286,20 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
}

func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {

nodeID, err := ns.Metadata.GetInstanceID()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] unable to retrieve instance id of node %v", err)
}

nodeInfo := &csi.NodeGetInfoResponse{
NodeId: nodeID,
MaxVolumesPerNode: ns.Opts.NodeVolumeAttachLimit,
}

if !ns.Driver.withTopology {
return nodeInfo, nil
}

zone, err := ns.Metadata.GetAvailabilityZone()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] Unable to retrieve availability zone of node %v", err)
Expand All @@ -301,13 +309,9 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
for k, v := range ns.Topologies {
topologyMap[k] = v
}
topology := &csi.Topology{Segments: topologyMap}
nodeInfo.AccessibleTopology = &csi.Topology{Segments: topologyMap}

return &csi.NodeGetInfoResponse{
NodeId: nodeID,
AccessibleTopology: topology,
MaxVolumesPerNode: ns.Opts.NodeVolumeAttachLimit,
}, nil
return nodeInfo, nil
}

func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/csi/cinder/nodeserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ var omock *openstack.OpenStackMock
func init() {
if fakeNs == nil {

d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})

// mock MountMock
mmock = new(mount.MountMock)
Expand Down
2 changes: 1 addition & 1 deletion tests/sanity/cinder/sanity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func TestDriver(t *testing.T) {
endpoint := "unix://" + socket
cluster := "kubernetes"

d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster, WithTopology: true})

fakecloudprovider := getfakecloud()
openstack.OsInstances = map[string]openstack.IOpenStack{
Expand Down

0 comments on commit ac23a1e

Please sign in to comment.