Skip to content

Commit

Permalink
Merge pull request #1098 from stgraber/main
Browse files Browse the repository at this point in the history
  • Loading branch information
tych0 authored Aug 9, 2024
2 parents 625583e + 8d846de commit 55bd006
Show file tree
Hide file tree
Showing 13 changed files with 254 additions and 18 deletions.
14 changes: 11 additions & 3 deletions cmd/incus/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -1108,23 +1108,31 @@ func (c *cmdProjectInfo) Run(cmd *cobra.Command, args []string) error {
byteLimits := []string{"disk", "memory"}
data := [][]string{}
for k, v := range projectState.Resources {
shortKey := strings.SplitN(k, ".", 2)[0]

limit := i18n.G("UNLIMITED")
if v.Limit >= 0 {
if slices.Contains(byteLimits, k) {
if slices.Contains(byteLimits, shortKey) {
limit = units.GetByteSizeStringIEC(v.Limit, 2)
} else {
limit = fmt.Sprintf("%d", v.Limit)
}
}

usage := ""
if slices.Contains(byteLimits, k) {
if slices.Contains(byteLimits, shortKey) {
usage = units.GetByteSizeStringIEC(v.Usage, 2)
} else {
usage = fmt.Sprintf("%d", v.Usage)
}

data = append(data, []string{strings.ToUpper(k), limit, usage})
columnName := strings.ToUpper(k)
fields := strings.SplitN(columnName, ".", 2)
if len(fields) == 2 {
columnName = fmt.Sprintf("%s (%s)", fields[0], fields[1])
}

data = append(data, []string{columnName, limit, usage})
}

sort.Sort(cli.SortColumnsNaturally(data))
Expand Down
28 changes: 28 additions & 0 deletions cmd/incusd/api_project.go
Original file line number Diff line number Diff line change
Expand Up @@ -1692,6 +1692,34 @@ func projectValidateConfig(s *state.State, config map[string]string) error {
"restricted.snapshots": isEitherAllowOrBlock,
}

// Add the storage pool keys.
err := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {
var err error

// Load all the pools.
pools, err := tx.GetStoragePoolNames(ctx)
if err != nil {
return err
}

// Add the storage-pool specific config keys.
for _, poolName := range pools {
// gendoc:generate(entity=project, group=limits, key=limits.disk.pool.POOL_NAME)
// This value is the maximum value of the aggregate disk
// space used by all instance volumes, custom volumes, and images of the
// project on this specific storage pool.
// ---
// type: string
// shortdesc: Maximum disk space used by the project on this pool
projectConfigKeys[fmt.Sprintf("limits.disk.pool.%s", poolName)] = validate.Optional(validate.IsSize)
}

return nil
})
if err != nil {
return fmt.Errorf("Failed loading storage pool names: %w", err)
}

for k, v := range config {
key := k

Expand Down
39 changes: 38 additions & 1 deletion cmd/incusd/storage_pools.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,13 +146,24 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response {
recursion := localUtil.IsRecursionRequest(r)

var poolNames []string
var hiddenPoolNames []string

err := s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error {
var err error

// Load the pool names.
poolNames, err = tx.GetStoragePoolNames(ctx)
if err != nil {
return err
}

return err
// Load the project limits.
hiddenPoolNames, err = project.HiddenStoragePools(ctx, tx, request.ProjectParam(r))
if err != nil {
return err
}

return nil
})
if err != nil && !response.IsNotFoundError(err) {
return response.SmartError(err)
Expand All @@ -166,6 +177,11 @@ func storagePoolsGet(d *Daemon, r *http.Request) response.Response {
resultString := []string{}
resultMap := []api.StoragePool{}
for _, poolName := range poolNames {
// Hide storage pools with a 0 project limit.
if slices.Contains(hiddenPoolNames, poolName) {
continue
}

if !recursion {
resultString = append(resultString, fmt.Sprintf("/%s/storage-pools/%s", version.APIVersion, poolName))
} else {
Expand Down Expand Up @@ -638,6 +654,27 @@ func storagePoolGet(d *Daemon, r *http.Request) response.Response {
memberSpecific = true
}

var hiddenPoolNames []string
err = s.DB.Cluster.Transaction(r.Context(), func(ctx context.Context, tx *db.ClusterTx) error {
var err error

// Load the project limits.
hiddenPoolNames, err = project.HiddenStoragePools(ctx, tx, request.ProjectParam(r))
if err != nil {
return err
}

return nil
})
if err != nil {
return response.SmartError(err)
}

// Hide storage pools with a 0 project limit.
if slices.Contains(hiddenPoolNames, poolName) {
return response.NotFound(nil)
}

// Get the existing storage pool.
pool, err := storagePools.LoadByName(s, poolName)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/incusd/storage_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -707,7 +707,7 @@ func storagePoolVolumesPost(d *Daemon, r *http.Request) response.Response {
return err
}

err = project.AllowVolumeCreation(tx, projectName, req)
err = project.AllowVolumeCreation(tx, projectName, poolName, req)
if err != nil {
return err
}
Expand Down
4 changes: 4 additions & 0 deletions doc/api-extensions.md
Original file line number Diff line number Diff line change
Expand Up @@ -2546,3 +2546,7 @@ The new configuration keys are:
## `disk_volume_subpath`

This introduces the ability to access the sub-path of a file system custom volume by using the `source=volume/path` syntax.

## `projects_limits_disk_pool`

This introduces per-pool project disk limits, introducing a `limits.disk.pool.NAME` configuration option to the project limits.
8 changes: 8 additions & 0 deletions doc/config_options.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1640,6 +1640,14 @@ This value is the maximum value for the sum of the individual {config:option}`in
This value is the maximum value of the aggregate disk space used by all instance volumes, custom volumes, and images of the project.
```

```{config:option} limits.disk.pool.POOL_NAME project-limits
:shortdesc: "Maximum disk space used by the project on this pool"
:type: "string"
This value is the maximum value of the aggregate disk
space used by all instance volumes, custom volumes, and images of the
project on this specific storage pool.
```

```{config:option} limits.instances project-limits
:shortdesc: "Maximum number of instances that can be created in the project"
:type: "integer"
Expand Down
7 changes: 7 additions & 0 deletions internal/server/metadata/configuration.json
Original file line number Diff line number Diff line change
Expand Up @@ -1818,6 +1818,13 @@
"type": "string"
}
},
{
"limits.disk.pool.POOL_NAME": {
"longdesc": "This value is the maximum value of the aggregate disk\nspace used by all instance volumes, custom volumes, and images of the\nproject on this specific storage pool.",
"shortdesc": "Maximum disk space used by the project on this pool",
"type": "string"
}
},
{
"limits.instances": {
"longdesc": "",
Expand Down
107 changes: 96 additions & 11 deletions internal/server/project/permissions.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,33 @@ import (
"github.com/lxc/incus/v6/shared/util"
)

// HiddenStoragePools returns a list of storage pools that should be hidden from users of the project.
func HiddenStoragePools(ctx context.Context, tx *db.ClusterTx, projectName string) ([]string, error) {
dbProject, err := cluster.GetProject(ctx, tx.Tx(), projectName)
if err != nil {
return nil, fmt.Errorf("Failed getting project: %w", err)
}

project, err := dbProject.ToAPI(ctx, tx.Tx())
if err != nil {
return nil, err
}

hiddenPools := []string{}
for k, v := range project.Config {
if !strings.HasPrefix(k, projectLimitDiskPool) || v != "0" {
continue
}

fields := strings.SplitN(k, projectLimitDiskPool, 2)
if len(fields) == 2 {
hiddenPools = append(hiddenPools, fields[1])
}
}

return hiddenPools, nil
}

// AllowInstanceCreation returns an error if any project-specific limit or
// restriction is violated when creating a new instance.
func AllowInstanceCreation(tx *db.ClusterTx, projectName string, req api.InstancesPost) error {
Expand Down Expand Up @@ -226,7 +253,7 @@ func checkRestrictionsOnVolatileConfig(project api.Project, instanceType instanc

// AllowVolumeCreation returns an error if any project-specific limit or
// restriction is violated when creating a new custom volume in a project.
func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVolumesPost) error {
func AllowVolumeCreation(tx *db.ClusterTx, projectName string, poolName string, req api.StorageVolumesPost) error {
info, err := fetchProject(tx, projectName, true)
if err != nil {
return err
Expand All @@ -243,8 +270,9 @@ func AllowVolumeCreation(tx *db.ClusterTx, projectName string, req api.StorageVo

// Add the volume being created.
info.Volumes = append(info.Volumes, db.StorageVolumeArgs{
Name: req.Name,
Config: req.Config,
Name: req.Name,
Config: req.Config,
PoolName: poolName,
})

err = checkRestrictionsAndAggregateLimits(tx, info)
Expand Down Expand Up @@ -311,8 +339,9 @@ func checkRestrictionsAndAggregateLimits(tx *db.ClusterTx, info *projectInfo) er
// across all project instances.
aggregateKeys := []string{}
isRestricted := false

for key, value := range info.Project.Config {
if slices.Contains(allAggregateLimits, key) {
if slices.Contains(allAggregateLimits, key) || strings.HasPrefix(key, projectLimitDiskPool) {
aggregateKeys = append(aggregateKeys, key)
continue
}
Expand Down Expand Up @@ -365,7 +394,14 @@ func getAggregateLimits(info *projectInfo, aggregateKeys []string) (map[string]a
max := int64(-1)
limit := info.Project.Config[key]
if limit != "" {
parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]
max, err = parser(info.Project.Config[key])
if err != nil {
return nil, err
Expand Down Expand Up @@ -394,7 +430,14 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error {
}

for _, key := range aggregateKeys {
parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]
max, err := parser(info.Project.Config[key])
if err != nil {
return err
Expand All @@ -404,6 +447,7 @@ func checkAggregateLimits(info *projectInfo, aggregateKeys []string) error {
return fmt.Errorf("Reached maximum aggregate value %q for %q in project %q", info.Project.Config[key], key, info.Project.Name)
}
}

return nil
}

Expand Down Expand Up @@ -1115,15 +1159,29 @@ func validateAggregateLimit(totals map[string]int64, key, value string) error {
return nil
}

parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]
limit, err := parser(value)
if err != nil {
return fmt.Errorf("Invalid value %q for limit %q: %w", value, key, err)
}

total := totals[key]
if limit < total {
printer := aggregateLimitConfigValuePrinters[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

printer := aggregateLimitConfigValuePrinters[keyName]
return fmt.Errorf("%q is too low: current total is %q", key, printer(total))
}

Expand Down Expand Up @@ -1267,8 +1325,18 @@ func getTotalsAcrossProjectEntities(info *projectInfo, keys []string, skipUnset

for _, key := range keys {
totals[key] = 0
if key == "limits.disk" {
if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) {
poolName := ""
fields := strings.SplitN(key, projectLimitDiskPool, 2)
if len(fields) == 2 {
poolName = fields[1]
}

for _, volume := range info.Volumes {
if poolName != "" && volume.PoolName != poolName {
continue
}

value, ok := volume.Config["size"]
if !ok {
if skipUnset {
Expand Down Expand Up @@ -1309,14 +1377,31 @@ func getInstanceLimits(inst api.Instance, keys []string, skipUnset bool) (map[st

for _, key := range keys {
var limit int64
parser := aggregateLimitConfigValueParsers[key]
keyName := key

// Handle pool-specific limits.
if strings.HasPrefix(key, projectLimitDiskPool) {
keyName = "limits.disk"
}

parser := aggregateLimitConfigValueParsers[keyName]

if key == "limits.disk" || strings.HasPrefix(key, projectLimitDiskPool) {
poolName := ""
fields := strings.SplitN(key, projectLimitDiskPool, 2)
if len(fields) == 2 {
poolName = fields[1]
}

if key == "limits.disk" {
_, device, err := instance.GetRootDiskDevice(inst.Devices)
if err != nil {
return nil, fmt.Errorf("Failed getting root disk device for instance %q in project %q: %w", inst.Name, inst.Project, err)
}

if poolName != "" && device["pool"] != poolName {
continue
}

value, ok := device["size"]
if !ok || value == "" {
if skipUnset {
Expand Down
3 changes: 3 additions & 0 deletions internal/server/project/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ import (
// separator is used to delimit the project name from the suffix.
const separator = "_"

// projectLimitDiskPool is the prefix used for pool-specific disk limits.
var projectLimitDiskPool = "limits.disk.pool."

// Instance adds the "<project>_" prefix to instance name when the given project name is not "default".
func Instance(projectName string, instanceName string) string {
if projectName != api.ProjectDefaultName {
Expand Down
Loading

0 comments on commit 55bd006

Please sign in to comment.