diff --git a/cmd/incus/project.go b/cmd/incus/project.go index 223045b9f58..ad689739693 100644 --- a/cmd/incus/project.go +++ b/cmd/incus/project.go @@ -1108,9 +1108,11 @@ func (c *cmdProjectInfo) Run(cmd *cobra.Command, args []string) error { byteLimits := []string{"disk", "memory"} data := [][]string{} for k, v := range projectState.Resources { + shortKey := strings.SplitN(k, ".", 2)[0] + limit := i18n.G("UNLIMITED") if v.Limit >= 0 { - if slices.Contains(byteLimits, k) { + if slices.Contains(byteLimits, shortKey) { limit = units.GetByteSizeStringIEC(v.Limit, 2) } else { limit = fmt.Sprintf("%d", v.Limit) @@ -1118,13 +1120,19 @@ func (c *cmdProjectInfo) Run(cmd *cobra.Command, args []string) error { } usage := "" - if slices.Contains(byteLimits, k) { + if slices.Contains(byteLimits, shortKey) { usage = units.GetByteSizeStringIEC(v.Usage, 2) } else { usage = fmt.Sprintf("%d", v.Usage) } - data = append(data, []string{strings.ToUpper(k), limit, usage}) + columnName := strings.ToUpper(k) + fields := strings.SplitN(columnName, ".", 2) + if len(fields) == 2 { + columnName = fmt.Sprintf("%s (%s)", fields[0], fields[1]) + } + + data = append(data, []string{columnName, limit, usage}) } sort.Sort(cli.SortColumnsNaturally(data)) diff --git a/cmd/incusd/api_project.go b/cmd/incusd/api_project.go index 2ed45434aaf..53111ce0ad4 100644 --- a/cmd/incusd/api_project.go +++ b/cmd/incusd/api_project.go @@ -1692,6 +1692,34 @@ func projectValidateConfig(s *state.State, config map[string]string) error { "restricted.snapshots": isEitherAllowOrBlock, } + // Add the storage pool keys. + err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error { + var err error + + // Load all the pools. + pools, err := tx.GetStoragePoolNames(ctx) + if err != nil { + return err + } + + // Add the storage-pool specific config keys. + for _, poolName := range pools { + // gendoc:generate(entity=project, group=limits, key=limits.disk.pool.POOL_NAME) + // This value is the maximum value of the aggregate disk + // space used by all instance volumes, custom volumes, and images of the + // project on this specific storage pool. + // --- + // type: string + // shortdesc: Maximum disk space used by the project on this pool + projectConfigKeys[fmt.Sprintf("limits.disk.pool.%s", poolName)] = validate.Optional(validate.IsSize) + } + + return nil + }) + if err != nil { + return fmt.Errorf("Failed loading storage pool names: %w", err) + } + for k, v := range config { key := k diff --git a/cmd/incusd/storage_pools.go b/cmd/incusd/storage_pools.go index 06a76999223..d111ce441a7 100644 --- a/cmd/incusd/storage_pools.go +++ b/cmd/incusd/storage_pools.go @@ -146,13 +146,24 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response { recursion := localUtil.IsRecursionRequest(r) var poolNames []string + var hiddenPoolNames []string err := s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { var err error + // Load the pool names. poolNames, err = tx.GetStoragePoolNames(ctx) + if err != nil { + return err + } - return err + // Load the project limits. + hiddenPoolNames, err = project.HiddenStoragePools(ctx, tx, request.ProjectParam(r)) + if err != nil { + return err + } + + return nil }) if err != nil && !response.IsNotFoundError(err) { return response.SmartError(err) @@ -166,6 +177,11 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response { resultString := []string{} resultMap := []api.StoragePool{} for _, poolName := range poolNames { + // Hide storage pools with a 0 project limit. + if slices.Contains(hiddenPoolNames, poolName) { + continue + } + if !recursion { resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, poolName)) } else { @@ -638,6 +654,27 @@ func storagePoolGet(d *Daemon, r *http.Request) response.Response { memberSpecific = true } + var hiddenPoolNames []string + err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error { + var err error + + // Load the project limits. + hiddenPoolNames, err = project.HiddenStoragePools(ctx, tx, request.ProjectParam(r)) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return response.SmartError(err) + } + + // Hide storage pools with a 0 project limit. + if slices.Contains(hiddenPoolNames, poolName) { + return response.NotFound(nil) + } + // Get the existing storage pool. pool, err := storagePools.LoadByName(s, poolName) if err != nil { diff --git a/cmd/incusd/storage_volumes.go b/cmd/incusd/storage_volumes.go index f0835de3998..f3e8ab86df6 100644 --- a/cmd/incusd/storage_volumes.go +++ b/cmd/incusd/storage_volumes.go @@ -707,7 +707,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) response.Response { return err } - err = project.AllowVolumeCreation(tx, projectName, req) + err = project.AllowVolumeCreation(tx, projectName, poolName, req) if err != nil { return err } diff --git a/doc/api-extensions.md b/doc/api-extensions.md index 71dd5339a93..9f24b640d89 100644 --- a/doc/api-extensions.md +++ b/doc/api-extensions.md @@ -2546,3 +2546,7 @@ The new configuration keys are: ## `disk_volume_subpath` This introduces the ability to access the sub-path of a file system custom volume by using the `source=volume/path` syntax. + +## `projects_limits_disk_pool` + +This introduces per-pool project disk limits, introducing a `limits.disk.pool.NAME` configuration option to the project limits. diff --git a/doc/config_options.txt b/doc/config_options.txt index f455b04361b..870bfa95b1e 100644 --- a/doc/config_options.txt +++ b/doc/config_options.txt @@ -1640,6 +1640,14 @@ This value is the maximum value for the sum of the individual {config:option}`in This value is the maximum value of the aggregate disk space used by all instance volumes, custom volumes, and images of the project. ``` +```{config:option} limits.disk.pool.POOL_NAME project-limits +:shortdesc: "Maximum disk space used by the project on this pool" +:type: "string" +This value is the maximum value of the aggregate disk +space used by all instance volumes, custom volumes, and images of the +project on this specific storage pool. +``` + ```{config:option} limits.instances project-limits :shortdesc: "Maximum number of instances that can be created in the project" :type: "integer" diff --git a/internal/server/metadata/configuration.json b/internal/server/metadata/configuration.json index da5c80d68c5..7f9dd8da5d6 100644 --- a/internal/server/metadata/configuration.json +++ b/internal/server/metadata/configuration.json @@ -1818,6 +1818,13 @@ "type": "string" } }, + { + "limits.disk.pool.POOL_NAME": { + "longdesc": "This value is the maximum value of the aggregate disk\nspace used by all instance volumes, custom volumes, and images of the\nproject on this specific storage pool.", + "shortdesc": "Maximum disk space used by the project on this pool", + "type": "string" + } + }, { "limits.instances": { "longdesc": "", diff --git a/internal/server/project/permissions.go b/internal/server/project/permissions.go index 608f5f4359c..37ba737ea07 100644 --- a/internal/server/project/permissions.go +++ b/internal/server/project/permissions.go @@ -21,6 +21,33 @@ import ( "github.com/lxc/incus/v6/shared/util" ) +// HiddenStoragePools returns a list of storage pools that should be hidden from users of the project. +func HiddenStoragePools(ctx context.Context, tx *db.ClusterTx, projectName string) ([]string, error) { + dbProject, err := cluster.GetProject(ctx, tx.Tx(), projectName) + if err != nil { + return nil, fmt.Errorf("Failed getting project: %w", err) + } + + project, err := dbProject.ToAPI(ctx, tx.Tx()) + if err != nil { + return nil, err + } + + hiddenPools := []string{} + for k, v := range project.Config { + if !strings.HasPrefix(k, projectLimitDiskPool) || v != "0" { + continue + } + + fields := strings.SplitN(k, projectLimitDiskPool, 2) + if len(fields) == 2 { + hiddenPools = append(hiddenPools, fields[1]) + } + } + + return hiddenPools, nil +} + // AllowInstanceCreation returns an error if any project-specific limit or // restriction is violated when creating a new instance. func AllowInstanceCreation(tx *db.ClusterTx, projectName string, req api.InstancesPost) error { @@ -226,7 +253,7 @@ func checkRestrictionsOnVolatileConfig(project api.Project, instanceType instanc // AllowVolumeCreation returns an error if any project-specific limit or // restriction is violated when creating a new custom volume in a project. -func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVolumesPost) error { +func AllowVolumeCreation(tx *db.ClusterTx, projectName string, poolName string, req api.StorageVolumesPost) error { info, err := fetchProject(tx, projectName, true) if err != nil { return err @@ -243,8 +270,9 @@ func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVo // Add the volume being created. info.Volumes = append(info.Volumes, db.StorageVolumeArgs{ - Name: req.Name, - Config: req.Config, + Name: req.Name, + Config: req.Config, + PoolName: poolName, }) err = checkRestrictionsAndAggregateLimits(tx, info) @@ -311,8 +339,9 @@ func checkRestrictionsAndAggregateLimits(tx *db.ClusterTx, info *projectInfo) er // across all project instances. aggregateKeys := []string{} isRestricted := false + for key, value := range info.Project.Config { - if slices.Contains(allAggregateLimits, key) { + if slices.Contains(allAggregateLimits, key) || strings.HasPrefix(key, projectLimitDiskPool) { aggregateKeys = append(aggregateKeys, key) continue } @@ -365,7 +394,14 @@ func getAggregateLimits(info *projectInfo, aggregateKeys []string) (map[string]a max := int64(-1) limit := info.Project.Config[key] if limit != "" { - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] max, err = parser(info.Project.Config[key]) if err != nil { return nil, err @@ -394,7 +430,14 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error { } for _, key := range aggregateKeys { - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] max, err := parser(info.Project.Config[key]) if err != nil { return err @@ -404,6 +447,7 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error { return fmt.Errorf("Reached maximum aggregate value %q for %q in project %q", info.Project.Config[key], key, info.Project.Name) } } + return nil } @@ -1115,7 +1159,14 @@ func validateAggregateLimit(totals map[string]int64, key, value string) error { return nil } - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] limit, err := parser(value) if err != nil { return fmt.Errorf("Invalid value %q for limit %q: %w", value, key, err) @@ -1123,7 +1174,14 @@ func validateAggregateLimit(totals map[string]int64, key, value string) error { total := totals[key] if limit < total { - printer := aggregateLimitConfigValuePrinters[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + printer := aggregateLimitConfigValuePrinters[keyName] return fmt.Errorf("%q is too low: current total is %q", key, printer(total)) } @@ -1267,8 +1325,18 @@ func getTotalsAcrossProjectEntities(info *projectInfo, keys []string, skipUnset for _, key := range keys { totals[key] = 0 - if key == "limits.disk" { + if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) { + poolName := "" + fields := strings.SplitN(key, projectLimitDiskPool, 2) + if len(fields) == 2 { + poolName = fields[1] + } + for _, volume := range info.Volumes { + if poolName != "" && volume.PoolName != poolName { + continue + } + value, ok := volume.Config["size"] if !ok { if skipUnset { @@ -1309,14 +1377,31 @@ func getInstanceLimits(inst api.Instance, keys []string, skipUnset bool) (map[st for _, key := range keys { var limit int64 - parser := aggregateLimitConfigValueParsers[key] + keyName := key + + // Handle pool-specific limits. + if strings.HasPrefix(key, projectLimitDiskPool) { + keyName = "limits.disk" + } + + parser := aggregateLimitConfigValueParsers[keyName] + + if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) { + poolName := "" + fields := strings.SplitN(key, projectLimitDiskPool, 2) + if len(fields) == 2 { + poolName = fields[1] + } - if key == "limits.disk" { _, device, err := instance.GetRootDiskDevice(inst.Devices) if err != nil { return nil, fmt.Errorf("Failed getting root disk device for instance %q in project %q: %w", inst.Name, inst.Project, err) } + if poolName != "" && device["pool"] != poolName { + continue + } + value, ok := device["size"] if !ok || value == "" { if skipUnset { diff --git a/internal/server/project/project.go b/internal/server/project/project.go index 28e7ae344f0..d58f66b327f 100644 --- a/internal/server/project/project.go +++ b/internal/server/project/project.go @@ -15,6 +15,9 @@ import ( // separator is used to delimit the project name from the suffix. const separator = "_" +// projectLimitDiskPool is the prefix used for pool-specific disk limits. +var projectLimitDiskPool = "limits.disk.pool." + // Instance adds the "_" prefix to instance name when the given project name is not "default". func Instance(projectName string, instanceName string) string { if projectName != api.ProjectDefaultName { diff --git a/internal/server/project/state.go b/internal/server/project/state.go index c1031516278..86e24415c3c 100644 --- a/internal/server/project/state.go +++ b/internal/server/project/state.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strconv" + "strings" "github.com/lxc/incus/v6/internal/server/db" "github.com/lxc/incus/v6/internal/server/instance/instancetype" @@ -29,6 +30,16 @@ func GetCurrentAllocations(ctx context.Context, tx *db.ClusterTx, projectName st return nil, err } + // Get per-pool limits. + poolLimits := []string{} + for k := range info.Project.Config { + if strings.HasPrefix(k, projectLimitDiskPool) { + poolLimits = append(poolLimits, k) + } + } + + allAggregateLimits := append(allAggregateLimits, poolLimits...) + // Get the instance aggregated values. raw, err := getAggregateLimits(info, allAggregateLimits) if err != nil { @@ -41,6 +52,13 @@ func GetCurrentAllocations(ctx context.Context, tx *db.ClusterTx, projectName st result["networks"] = raw["limits.networks"] result["processes"] = raw["limits.processes"] + // Add the pool-specific disk limits. + for k, v := range raw { + if strings.HasPrefix(k, projectLimitDiskPool) && v.Limit > 0 { + result[fmt.Sprintf("disk.%s", strings.SplitN(k, ".", 4)[3])] = v + } + } + // Get the instance count values. count, limit, err := getTotalInstanceCountLimit(info) if err != nil { diff --git a/internal/server/storage/backend.go b/internal/server/storage/backend.go index 5bf2c807d57..d034794296b 100644 --- a/internal/server/storage/backend.go +++ b/internal/server/storage/backend.go @@ -7006,7 +7006,7 @@ func (b *backend) CreateCustomVolumeFromISO(projectName string, volName string, } err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { - return project.AllowVolumeCreation(tx, projectName, req) + return project.AllowVolumeCreation(tx, projectName, b.name, req) }) if err != nil { return fmt.Errorf("Failed checking volume creation allowed: %w", err) @@ -7096,7 +7096,7 @@ func (b *backend) CreateCustomVolumeFromBackup(srcBackup backup.Info, srcData io } err := b.state.DB.Cluster.Transaction(b.state.ShutdownCtx, func(ctx context.Context, tx *db.ClusterTx) error { - return project.AllowVolumeCreation(tx, srcBackup.Project, req) + return project.AllowVolumeCreation(tx, srcBackup.Project, b.name, req) }) if err != nil { return fmt.Errorf("Failed checking volume creation allowed: %w", err) diff --git a/internal/version/api.go b/internal/version/api.go index 3d260553ca4..314c6ce7f8a 100644 --- a/internal/version/api.go +++ b/internal/version/api.go @@ -431,6 +431,7 @@ var APIExtensions = []string{ "instances_lxcfs_per_instance", "clustering_groups_vm_cpu_definition", "disk_volume_subpath", + "projects_limits_disk_pool", } // APIExtensionsCount returns the number of available API extensions. diff --git a/test/suites/projects.sh b/test/suites/projects.sh index 463444a9152..b52dd7a488c 100644 --- a/test/suites/projects.sh +++ b/test/suites/projects.sh @@ -550,6 +550,43 @@ test_projects_limits() { deps/import-busybox --project p1 --alias testimage + # Test per-pool limits. + incus storage create limit1 dir + incus storage create limit2 dir + + incus project set p1 limits.disk=50MiB + incus project set p1 limits.disk.pool.limit1=0 + incus project set p1 limits.disk.pool.limit2=0 + + ! incus storage list | grep -q limit1 || false + ! incus storage list | grep -q limit2 || false + + incus storage volume create "${pool}" foo size=10MiB + ! incus storage volume create "${pool}" bar size=50MiB || false + incus storage volume delete "${pool}" foo + + ! incus storage volume create limit1 foo size=10GiB || false + ! incus storage volume create limit2 foo size=10GiB || false + + incus project set p1 limits.disk.pool.limit1=10MiB + incus project set p1 limits.disk.pool.limit2=10MiB + incus storage volume create limit1 foo size=10MiB + ! incus storage volume create limit1 bar size=10MiB || false + incus storage volume create limit2 foo size=10MiB + ! incus storage volume create limit2 bar size=10MiB || false + + ! incus storage volume create "${pool}" foo size=40MiB || false + incus storage volume delete limit1 foo + incus storage volume delete limit2 foo + incus storage volume create "${pool}" foo size=40MiB + + incus storage volume delete "${pool}" foo + incus project unset p1 limits.disk.pool.limit1 + incus project unset p1 limits.disk.pool.limit2 + incus project unset p1 limits.disk + incus storage delete limit1 + incus storage delete limit2 + # Create a couple of containers in the project. incus init testimage c1 incus init testimage c2