Skip to content

Commit

Permalink
Merge pull request #988 from stgraber/import
Browse files Browse the repository at this point in the history
Import LXD changes from stable-5.0
  • Loading branch information
hallyn authored Jul 13, 2024
2 parents 630f2d0 + 71b0e89 commit 716ad48
Show file tree
Hide file tree
Showing 49 changed files with 468 additions and 166 deletions.
27 changes: 17 additions & 10 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -164,16 +164,23 @@ jobs:
# optimize ext4 FSes for performance, not reliability
for fs in $(findmnt --noheading --type ext4 --list --uniq | awk '{print $1}'); do
# nombcache and data=writeback cannot be changed on remount
sudo mount -o remount,noatime,barrier=0,commit=6000 "${fs}"
sudo mount -o remount,noatime,barrier=0,commit=6000 "${fs}" || true
done
# disable dpkg from calling sync()
echo "force-unsafe-io" | sudo tee /etc/dpkg/dpkg.cfg.d/force-unsafe-io
- name: Reclaim some space
run: |
# This was inspired from https://github.com/easimon/maximize-build-space
set -eux
sudo snap remove lxd --purge
# Purge older snap revisions that are disabled/superseded by newer revisions of the same snap
snap list --all | while read -r name _ rev _ _ notes _; do
[ "${notes}" = "disabled" ] && snap remove "${name}" --revision "${rev}" --purge
done || true
# This was inspired from https://github.com/easimon/maximize-build-space
df -h /
# dotnet
sudo rm -rf /usr/share/dotnet
Expand All @@ -183,6 +190,13 @@ jobs:
sudo rm -rf /opt/ghc
df -h /
- name: Remove docker
run: |
set -eux
sudo apt-get autopurge -y moby-containerd docker uidmap
sudo ip link delete docker0
sudo nft flush ruleset
- name: Checkout
uses: actions/checkout@v4

Expand Down Expand Up @@ -213,14 +227,7 @@ jobs:
sudo add-apt-repository ppa:cowsql/stable -y --no-update
sudo apt-get update
sudo snap remove lxd --purge
sudo snap remove core20 --purge || true
sudo apt-get autopurge moby-containerd docker uidmap -y
sudo ip link delete docker0
sudo nft flush ruleset
sudo systemctl mask lxc.service
sudo systemctl mask lxc-net.service
sudo systemctl mask lxc.service lxc-net.service
sudo apt-get install --no-install-recommends -y \
curl \
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ endif
flake8 test/deps/import-busybox
shellcheck --shell sh test/*.sh test/includes/*.sh test/suites/*.sh test/backends/*.sh test/lint/*.sh
shellcheck test/extras/*.sh
run-parts --exit-on-error --regex '.sh' test/lint
run-parts --verbose --exit-on-error --regex '.sh' test/lint

.PHONY: staticcheck
staticcheck:
Expand Down
6 changes: 5 additions & 1 deletion cmd/incus-migrate/main_migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,11 @@ func (c *cmdMigrate) Run(cmd *cobra.Command, args []string) error {
}

cancel()
os.Exit(1)

// The following nolint directive ignores the "deep-exit" rule of the revive linter.
// We should be exiting cleanly by passing the above context into each invoked method and checking for
// cancellation. Unfortunately our client methods do not accept a context argument.
os.Exit(1) //nolint:revive
}()

if clientFingerprint != "" {
Expand Down
19 changes: 17 additions & 2 deletions cmd/incus/remote.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type cmdRemote struct {
global *cmdGlobal
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemote) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("remote")
Expand Down Expand Up @@ -95,6 +96,7 @@ type cmdRemoteAdd struct {
flagProject string
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteAdd) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("add", i18n.G("[<remote>] <IP|FQDN|URL|token>"))
Expand Down Expand Up @@ -163,7 +165,7 @@ func (c *cmdRemoteAdd) findProject(d incus.InstanceServer, project string) (stri
return project, nil
}

func (c *cmdRemoteAdd) RunToken(server string, token string, rawToken *api.CertificateAddToken) error {
func (c *cmdRemoteAdd) runToken(server string, token string, rawToken *api.CertificateAddToken) error {
conf := c.global.conf

if !conf.HasClientCertificate() {
Expand Down Expand Up @@ -281,6 +283,7 @@ func (c *cmdRemoteAdd) addRemoteFromToken(addr string, server string, token stri
return conf.SaveConfig(c.global.confPath)
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteAdd) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand Down Expand Up @@ -324,7 +327,7 @@ func (c *cmdRemoteAdd) Run(cmd *cobra.Command, args []string) error {

rawToken, err := localtls.CertificateTokenDecode(addr)
if err == nil {
return c.RunToken(server, addr, rawToken)
return c.runToken(server, addr, rawToken)
}

// Complex remote URL parsing
Expand Down Expand Up @@ -669,6 +672,7 @@ type cmdRemoteGetDefault struct {
remote *cmdRemote
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteGetDefault) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("get-default")
Expand All @@ -681,6 +685,7 @@ func (c *cmdRemoteGetDefault) Command() *cobra.Command {
return cmd
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteGetDefault) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand All @@ -704,6 +709,7 @@ type cmdRemoteList struct {
flagFormat string
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteList) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("list")
Expand All @@ -718,6 +724,7 @@ func (c *cmdRemoteList) Command() *cobra.Command {
return cmd
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteList) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand Down Expand Up @@ -788,6 +795,7 @@ type cmdRemoteRename struct {
remote *cmdRemote
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteRename) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("rename", i18n.G("<remote> <new-name>"))
Expand All @@ -809,6 +817,7 @@ func (c *cmdRemoteRename) Command() *cobra.Command {
return cmd
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteRename) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand Down Expand Up @@ -867,6 +876,7 @@ type cmdRemoteRemove struct {
remote *cmdRemote
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteRemove) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("remove", i18n.G("<remote>"))
Expand All @@ -888,6 +898,7 @@ func (c *cmdRemoteRemove) Command() *cobra.Command {
return cmd
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteRemove) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand Down Expand Up @@ -930,6 +941,7 @@ type cmdRemoteSwitch struct {
remote *cmdRemote
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteSwitch) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Aliases = []string{"set-default"}
Expand All @@ -951,6 +963,7 @@ func (c *cmdRemoteSwitch) Command() *cobra.Command {
return cmd
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteSwitch) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand All @@ -977,6 +990,7 @@ type cmdRemoteSetURL struct {
remote *cmdRemote
}

// Command returns a cobra.Command for use with (*cobra.Command).AddCommand.
func (c *cmdRemoteSetURL) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = usage("set-url", i18n.G("<remote> <URL>"))
Expand All @@ -997,6 +1011,7 @@ func (c *cmdRemoteSetURL) Command() *cobra.Command {
return cmd
}

// Run is used in the RunE field of the cobra.Command returned by Command.
func (c *cmdRemoteSetURL) Run(cmd *cobra.Command, args []string) error {
conf := c.global.conf

Expand Down
2 changes: 1 addition & 1 deletion cmd/incusd/api_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ func clusterPutJoin(d *Daemon, r *http.Request, req api.ClusterPut) response.Res

return nil
})
if err != nil && err.Error() != "This certificate already exists" {
if err != nil && !api.StatusErrorCheck(err, http.StatusConflict) {
return fmt.Errorf("Failed adding local trusted certificate %q (%s): %w", trustedCert.Name, trustedCert.Fingerprint, err)
}
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/incusd/api_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,7 @@ func internalImportFromBackup(ctx context.Context, s *state.State, projectName s

if backupConf.Pool == nil {
// We don't know what kind of storage type the pool is.
return fmt.Errorf(`No storage pool struct in the backup file found. The storage pool needs to be recovered manually`)
return fmt.Errorf("No storage pool struct in the backup file found. The storage pool needs to be recovered manually")
}

// Try to retrieve the storage pool the instance supposedly lives on.
Expand Down
2 changes: 1 addition & 1 deletion cmd/incusd/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -4548,7 +4548,7 @@ func autoSyncImages(ctx context.Context, s *state.State) error {
go func(projectName string, fingerprint string) {
err := imageSyncBetweenNodes(ctx, s, nil, projectName, fingerprint)
if err != nil {
logger.Error("Failed to synchronize images", logger.Ctx{"err": err, "fingerprint": fingerprint})
logger.Error("Failed to synchronize images", logger.Ctx{"err": err, "project": projectName, "fingerprint": fingerprint})
}

ch <- nil
Expand Down
2 changes: 1 addition & 1 deletion cmd/incusd/instances_post.go
Original file line number Diff line number Diff line change
Expand Up @@ -738,7 +738,7 @@ func createFromBackup(s *state.State, r *http.Request, projectName string, data

inst, err := instance.LoadByProjectAndName(s, bInfo.Project, bInfo.Name)
if err != nil {
return fmt.Errorf("Load instance: %w", err)
return fmt.Errorf("Failed loading instance: %w", err)
}

// Clean up created instance if the post hook fails below.
Expand Down
2 changes: 1 addition & 1 deletion cmd/incusd/migrate_storage_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func (s *migrationSourceWs) DoStorage(state *state.State, projectName string, po

volSourceArgs := &localMigration.VolumeSourceArgs{
IndexHeaderVersion: respHeader.GetIndexHeaderVersion(), // Enable index header frame if supported.
Name: volName,
Name: srcConfig.Volume.Name,
MigrationType: migrationTypes[0],
Snapshots: offerHeader.SnapshotNames,
TrackProgress: true,
Expand Down
28 changes: 20 additions & 8 deletions cmd/incusd/storage_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -808,22 +808,28 @@ func clusterCopyCustomVolumeInternal(s *state.State, r *http.Request, sourceAddr
}

func doCustomVolumeRefresh(s *state.State, r *http.Request, requestProjectName string, projectName string, poolName string, req *api.StorageVolumesPost) response.Response {
var run func(op *operations.Operation) error

pool, err := storagePools.LoadByName(s, poolName)
if err != nil {
return response.SmartError(err)
}

run = func(op *operations.Operation) error {
var srcProjectName string
if req.Source.Project != "" {
srcProjectName, err = project.StorageVolumeProject(s.DB.Cluster, req.Source.Project, db.StoragePoolVolumeTypeCustom)
if err != nil {
return response.SmartError(err)
}
}

run := func(op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()

if req.Source.Name == "" {
return fmt.Errorf("No source volume name supplied")
}

err = pool.RefreshCustomVolume(projectName, req.Source.Project, req.Name, req.Description, req.Config, req.Source.Pool, req.Source.Name, !req.Source.VolumeOnly, op)
err = pool.RefreshCustomVolume(projectName, srcProjectName, req.Name, req.Description, req.Config, req.Source.Pool, req.Source.Name, !req.Source.VolumeOnly, op)
if err != nil {
return err
}
Expand All @@ -841,13 +847,19 @@ func doCustomVolumeRefresh(s *state.State, r *http.Request, requestProjectName s
}

func doVolumeCreateOrCopy(s *state.State, r *http.Request, requestProjectName string, projectName string, poolName string, req *api.StorageVolumesPost) response.Response {
var run func(op *operations.Operation) error

pool, err := storagePools.LoadByName(s, poolName)
if err != nil {
return response.SmartError(err)
}

var srcProjectName string
if req.Source.Project != "" {
srcProjectName, err = project.StorageVolumeProject(s.DB.Cluster, req.Source.Project, db.StoragePoolVolumeTypeCustom)
if err != nil {
return response.SmartError(err)
}
}

volumeDBContentType, err := storagePools.VolumeContentTypeNameToContentType(req.ContentType)
if err != nil {
return response.SmartError(err)
Expand All @@ -858,15 +870,15 @@ func doVolumeCreateOrCopy(s *state.State, r *http.Request, requestProjectName st
return response.SmartError(err)
}

run = func(op *operations.Operation) error {
run := func(op *operations.Operation) error {
if req.Source.Name == "" {
// Use an empty operation for this sync response to pass the requestor
op := &operations.Operation{}
op.SetRequestor(r)
return pool.CreateCustomVolume(projectName, req.Name, req.Description, req.Config, contentType, op)
}

return pool.CreateCustomVolumeFromCopy(projectName, req.Source.Project, req.Name, req.Description, req.Config, req.Source.Pool, req.Source.Name, !req.Source.VolumeOnly, op)
return pool.CreateCustomVolumeFromCopy(projectName, srcProjectName, req.Name, req.Description, req.Config, req.Source.Pool, req.Source.Name, !req.Source.VolumeOnly, op)
}

// If no source name supplied then this a volume create operation.
Expand Down
1 change: 1 addition & 0 deletions doc/.sphinx/spellingcheck.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
jobs: 2
matrix:
- name: Markdown files
aspell:
Expand Down
11 changes: 11 additions & 0 deletions doc/howto/images_manage.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,17 @@ To edit the full image properties, including the top-level properties, enter the

incus image edit <image_ID>

## Delete an image

To delete a local copy of an image, enter the following command:

incus image delete <image_ID>

Deleting an image won't affect running instances that are already using it, but it will remove the image locally.

After deletion, if the image was downloaded from a remote server, it will be removed from local cache and downloaded again on next use.
However, if the image was manually created (not cached), the image will be deleted.

## Configure image aliases

Configuring an alias for an image can be useful to make it easier to refer to an image, since remembering an alias is usually easier than remembering a fingerprint.
Expand Down
2 changes: 1 addition & 1 deletion doc/howto/storage_backup_volume.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ You can restore a custom storage volume to the state of any of its snapshots.
To do so, you must first stop all instances that use the storage volume.
Then use the following command:

incus storage volume restore <pool_name> <volume_name> <snapshot_name>
incus storage volume snapshot restore <pool_name> <volume_name> <snapshot_name>

You can also restore a snapshot into a new custom storage volume, either in the same storage pool or in a different one (even a remote storage pool).
To do so, use the following command:
Expand Down
4 changes: 2 additions & 2 deletions doc/reference/instance_options.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ Limiting huge pages is done through the `hugetlb` cgroup controller, which means
(instance-options-limits-kernel)=
### Kernel resource limits

Incus exposes a generic namespaced key `limits.kernel.*` that can be used to set resource limits for an instance.
For container instances, Incus exposes a generic namespaced key `limits.kernel.*` that can be used to set resource limits.

It is generic in the sense that Incus does not perform any validation on the resource that is specified following the `limits.kernel.*` prefix.
Incus cannot know about all the possible resources that a given kernel supports.
Expand All @@ -197,7 +197,7 @@ For example, `RLIMIT_NOFILE` should be specified as `nofile`.
A limit is specified as two colon-separated values that are either numeric or the word `unlimited` (for example, `limits.kernel.nofile=1000:2000`).
A single value can be used as a shortcut to set both soft and hard limit to the same value (for example, `limits.kernel.nofile=3000`).

A resource with no explicitly configured limit will inherit its limit from the process that starts up the instance.
A resource with no explicitly configured limit will inherit its limit from the process that starts up the container.
Note that this inheritance is not enforced by Incus but by the kernel.

(instance-options-migration)=
Expand Down
Loading

0 comments on commit 716ad48

Please sign in to comment.