From 1fe73e83fa348fb7755b1ddcee9b5b73c2c81d57 Mon Sep 17 00:00:00 2001 From: Aaron Prindle Date: Sat, 10 Feb 2024 01:44:47 +0000 Subject: [PATCH] chore(deps): bump github.com/moby/buildkit from 0.11.6 to 0.12.5, github.com/docker/docker from 24.0.7+incompatible to 25.0.2+incompatible, and other deps --- go.mod | 14 +- go.sum | 325 +----- pkg/commands/healthcheck.go | 13 +- .../github.com/containerd/typeurl/.gitignore | 2 - vendor/github.com/containerd/typeurl/LICENSE | 191 ---- .../github.com/containerd/typeurl/README.md | 20 - vendor/github.com/containerd/typeurl/doc.go | 83 -- vendor/github.com/containerd/typeurl/types.go | 214 ---- .../distribution/reference/.gitattributes | 1 + .../distribution/reference/.gitignore | 2 + .../distribution/reference/.golangci.yml | 18 + .../distribution/reference/CODE-OF-CONDUCT.md | 5 + .../distribution/reference/CONTRIBUTING.md | 114 +++ .../distribution/reference/GOVERNANCE.md | 144 +++ .../reference}/LICENSE | 8 +- .../distribution/reference/MAINTAINERS | 26 + .../distribution/reference/Makefile | 25 + .../distribution/reference/README.md | 30 + .../distribution/reference/SECURITY.md | 7 + .../reference/distribution-logo.svg | 1 + .../distribution/reference/helpers.go | 42 + .../distribution/reference/normalize.go | 224 +++++ .../distribution/reference/reference.go | 436 ++++++++ .../distribution/reference/regexp.go | 163 +++ .../github.com/distribution/reference/sort.go | 75 ++ vendor/github.com/docker/docker/AUTHORS | 49 +- vendor/github.com/docker/docker/api/README.md | 2 +- vendor/github.com/docker/docker/api/common.go | 2 +- .../docker/docker/api/common_unix.go | 7 - .../docker/docker/api/common_windows.go | 8 - .../github.com/docker/docker/api/swagger.yaml | 365 +++++-- .../docker/docker/api/types/auth.go | 7 - .../docker/api/types/backend/backend.go | 43 +- .../docker/api/types/checkpoint/list.go | 7 + .../docker/api/types/checkpoint/options.go | 19 + .../docker/docker/api/types/client.go | 91 -- .../docker/docker/api/types/configs.go | 49 - .../container/change_response_deprecated.go | 6 - .../docker/api/types/container/config.go | 36 +- .../docker/api/types/container/errors.go | 9 + .../docker/api/types/container/hostconfig.go | 52 +- .../api/types/container/hostconfig_unix.go | 15 +- .../api/types/container/hostconfig_windows.go | 10 +- .../docker/api/types/container/options.go | 67 ++ .../docker/docker/api/types/events/events.go | 84 +- .../delete_response.go} | 8 +- .../docker/docker/api/types/image/image.go | 9 + .../{image_summary.go => image/summary.go} | 15 +- .../docker/docker/api/types/mount/mount.go | 7 +- .../docker/api/types/network/endpoint.go | 147 +++ .../docker/docker/api/types/network/ipam.go | 134 +++ .../docker/api/types/network/network.go | 82 +- .../api/types/plugins/logdriver/entry.pb.go | 383 ++++---- .../docker/api/types/plugins/logdriver/gen.go | 2 +- .../docker/api/types/registry/registry.go | 4 +- .../docker/api/types/swarm/container.go | 43 +- .../docker/api/types/swarm/runtime/gen.go | 2 +- .../api/types/swarm/runtime/plugin.pb.go | 400 ++++---- .../api/types/swarm/runtime/plugin.proto | 2 - .../docker/docker/api/types/swarm/service.go | 6 +- .../types/swarm/service_create_response.go | 20 + .../{ => swarm}/service_update_response.go | 2 +- .../docker/docker/api/types/system/info.go | 116 +++ .../docker/docker/api/types/system/runtime.go | 20 + .../docker/api/types/system/security_opts.go | 48 + .../docker/docker/api/types/types.go | 238 +---- .../docker/api/types/types_deprecated.go | 138 +++ .../docker/docker/builder/builder.go | 11 +- .../docker/builder/dockerfile/buildargs.go | 2 + .../docker/builder/dockerfile/builder.go | 8 +- .../docker/builder/dockerfile/builder_unix.go | 1 - .../builder/dockerfile/containerbackend.go | 42 +- .../docker/docker/builder/dockerfile/copy.go | 27 +- .../docker/builder/dockerfile/copy_unix.go | 1 - .../docker/builder/dockerfile/dispatchers.go | 14 +- .../builder/dockerfile/dispatchers_unix.go | 1 - .../builder/dockerfile/dispatchers_windows.go | 1 - .../docker/builder/dockerfile/evaluator.go | 18 +- .../docker/builder/dockerfile/imagecontext.go | 4 +- .../docker/builder/dockerfile/imageprobe.go | 15 +- .../docker/builder/dockerfile/internals.go | 23 +- .../builder/dockerfile/internals_linux.go | 6 +- .../builder/dockerfile/internals_windows.go | 17 +- .../docker/builder/remotecontext/archive.go | 11 +- .../docker/builder/remotecontext/detect.go | 13 +- .../docker/builder/remotecontext/generate.go | 3 - .../docker/builder/remotecontext/git.go | 7 +- .../builder/remotecontext/git/gitutils.go | 1 - .../docker/builder/remotecontext/mimetype.go | 19 +- .../docker/builder/remotecontext/remote.go | 4 +- .../docker/builder/remotecontext/tarsum.go | 161 --- .../docker/builder/remotecontext/tarsum.pb.go | 525 ---------- .../docker/builder/remotecontext/tarsum.proto | 7 - .../builder/remotecontext/urlutil/urlutil.go | 2 +- .../github.com/docker/docker/client/README.md | 19 +- .../docker/docker/client/build_prune.go | 2 +- .../docker/docker/client/checkpoint_create.go | 4 +- .../docker/docker/client/checkpoint_delete.go | 4 +- .../docker/docker/client/checkpoint_list.go | 6 +- .../github.com/docker/docker/client/client.go | 168 +++- .../docker/docker/client/client_unix.go | 1 - .../docker/docker/client/config_create.go | 2 +- .../docker/docker/client/config_inspect.go | 2 +- .../docker/docker/client/config_list.go | 2 +- .../docker/docker/client/config_remove.go | 2 +- .../docker/docker/client/config_update.go | 2 +- .../docker/docker/client/container_attach.go | 9 +- .../docker/docker/client/container_commit.go | 5 +- .../docker/docker/client/container_create.go | 35 +- .../docker/docker/client/container_exec.go | 15 +- .../docker/docker/client/container_list.go | 4 +- .../docker/docker/client/container_logs.go | 4 +- .../docker/docker/client/container_prune.go | 2 +- .../docker/docker/client/container_remove.go | 4 +- .../docker/docker/client/container_resize.go | 6 +- .../docker/docker/client/container_restart.go | 12 +- .../docker/docker/client/container_start.go | 4 +- .../docker/docker/client/container_stats.go | 12 +- .../docker/docker/client/container_stop.go | 12 +- .../docker/docker/client/container_wait.go | 14 +- .../docker/client/distribution_inspect.go | 7 +- .../github.com/docker/docker/client/errors.go | 30 +- .../github.com/docker/docker/client/hijack.go | 73 +- .../docker/docker/client/image_build.go | 16 +- .../docker/docker/client/image_create.go | 8 +- .../docker/docker/client/image_import.go | 2 +- .../docker/docker/client/image_list.go | 12 +- .../docker/docker/client/image_load.go | 6 +- .../docker/docker/client/image_prune.go | 2 +- .../docker/docker/client/image_pull.go | 2 +- .../docker/docker/client/image_push.go | 8 +- .../docker/docker/client/image_remove.go | 5 +- .../docker/docker/client/image_search.go | 6 +- .../docker/docker/client/image_tag.go | 2 +- .../github.com/docker/docker/client/info.go | 6 +- .../docker/docker/client/interface.go | 31 +- .../docker/client/interface_experimental.go | 8 +- .../docker/docker/client/network_create.go | 12 + .../docker/docker/client/network_prune.go | 2 +- .../docker/docker/client/node_list.go | 1 - .../docker/docker/client/options.go | 105 +- .../github.com/docker/docker/client/ping.go | 12 +- .../docker/docker/client/plugin_install.go | 13 +- .../docker/docker/client/plugin_push.go | 6 +- .../docker/docker/client/plugin_upgrade.go | 10 +- .../docker/docker/client/request.go | 89 +- .../docker/docker/client/secret_create.go | 2 +- .../docker/docker/client/secret_inspect.go | 2 +- .../docker/docker/client/secret_list.go | 2 +- .../docker/docker/client/secret_remove.go | 2 +- .../docker/docker/client/secret_update.go | 2 +- .../docker/docker/client/service_create.go | 30 +- .../docker/docker/client/service_logs.go | 4 +- .../docker/docker/client/service_update.go | 31 +- .../docker/docker/client/task_logs.go | 4 +- .../docker/docker/client/transport.go | 17 - .../docker/docker/client/volume_prune.go | 2 +- .../docker/docker/client/volume_remove.go | 10 +- .../docker/docker/client/volume_update.go | 2 +- .../docker/docker/container/attach_context.go | 35 + .../docker/docker/container/container.go | 89 +- .../docker/docker/container/container_unix.go | 27 +- .../docker/container/container_windows.go | 3 +- .../docker/docker/container/exec.go | 5 +- .../docker/docker/container/health.go | 9 +- .../docker/docker/container/monitor.go | 7 +- .../docker/docker/container/mounts_unix.go | 15 +- .../docker/docker/container/stream/attach.go | 16 +- .../docker/docker/container/stream/streams.go | 8 +- .../docker/docker/container/view.go | 7 +- .../docker/daemon/graphdriver/driver.go | 54 +- .../daemon/graphdriver/driver_freebsd.go | 6 +- .../docker/daemon/graphdriver/driver_linux.go | 5 +- .../daemon/graphdriver/driver_unsupported.go | 7 +- .../daemon/graphdriver/driver_windows.go | 6 +- .../docker/daemon/graphdriver/fsdiff.go | 32 +- .../docker/docker/daemon/logger/adapter.go | 7 +- .../docker/docker/daemon/logger/copier.go | 5 +- .../daemon/logger/jsonfilelog/jsonfilelog.go | 4 +- .../jsonfilelog/jsonlog/jsonlogbytes.go | 2 +- .../docker/daemon/logger/local/local.go | 4 +- .../docker/daemon/logger/logger_error.go | 6 +- .../logger/loggerutils/cache/local_cache.go | 5 +- .../daemon/logger/loggerutils/file_unix.go | 1 - .../daemon/logger/loggerutils/follow.go | 7 +- .../daemon/logger/loggerutils/logfile.go | 20 +- .../docker/docker/daemon/logger/plugin.go | 2 +- .../docker/daemon/logger/plugin_unix.go | 3 +- .../daemon/logger/plugin_unsupported.go | 1 - .../docker/docker/daemon/logger/proxy.go | 8 +- .../docker/docker/daemon/network/settings.go | 6 +- .../github.com/docker/docker/errdefs/defs.go | 2 +- .../docker/docker/errdefs/helpers.go | 2 +- .../docker/docker/errdefs/http_helpers.go | 2 +- vendor/github.com/docker/docker/errdefs/is.go | 18 +- vendor/github.com/docker/docker/image/fs.go | 15 +- .../github.com/docker/docker/image/image.go | 46 +- .../docker/docker/image/image_os.go | 18 + .../github.com/docker/docker/image/rootfs.go | 5 +- .../docker/image/spec/specs-go/v1/image.go | 54 + .../github.com/docker/docker/image/store.go | 44 +- .../docker/internal/multierror/multierror.go | 46 + .../docker/internal/unshare/unshare_linux.go | 1 - .../docker/docker/layer/filestore.go | 17 +- .../github.com/docker/docker/layer/layer.go | 9 +- .../docker/docker/layer/layer_store.go | 37 +- .../docker/docker/layer/layer_unix.go | 1 - .../docker/docker/layer/migration.go | 47 +- .../docker/libcontainerd/types/types.go | 4 +- .../docker/docker/oci/caps/utils_linux.go | 5 +- .../docker/docker/oci/caps/utils_other.go | 1 - .../github.com/docker/docker/oci/defaults.go | 10 +- .../docker/docker/pkg/archive/archive.go | 91 +- .../docker/pkg/archive/archive_linux.go | 9 +- .../docker/pkg/archive/archive_other.go | 1 - .../docker/docker/pkg/archive/archive_unix.go | 18 +- .../docker/docker/pkg/archive/changes.go | 23 +- .../docker/pkg/archive/changes_linux.go | 2 +- .../docker/pkg/archive/changes_other.go | 1 - .../docker/docker/pkg/archive/changes_unix.go | 1 - .../docker/docker/pkg/archive/copy.go | 5 +- .../docker/docker/pkg/archive/copy_unix.go | 1 - .../docker/docker/pkg/archive/diff.go | 9 +- .../docker/docker/pkg/archive/diff_unix.go | 1 - .../docker/docker/pkg/archive/path_unix.go | 1 - .../docker/pkg/archive/time_unsupported.go | 1 - .../docker/pkg/chrootarchive/archive.go | 2 +- .../docker/pkg/chrootarchive/archive_linux.go | 53 + .../docker/pkg/chrootarchive/archive_unix.go | 18 +- .../pkg/chrootarchive/archive_unix_nolinux.go | 226 +++++ .../pkg/chrootarchive/archive_windows.go | 4 +- .../docker/pkg/chrootarchive/diff_unix.go | 22 +- .../docker/pkg/containerfs/containerfs.go | 14 + .../pkg/containerfs/containerfs_unix.go | 11 - .../pkg/containerfs/containerfs_windows.go | 15 - .../docker/docker/pkg/containerfs/rm.go | 1 - .../docker/docker/pkg/homedir/homedir.go | 44 + .../docker/pkg/homedir/homedir_others.go | 1 - .../docker/docker/pkg/homedir/homedir_unix.go | 37 +- .../docker/pkg/homedir/homedir_windows.go | 24 +- .../docker/docker/pkg/idtools/idtools_unix.go | 17 +- .../pkg/idtools/usergroupadd_unsupported.go | 1 - .../docker/docker/pkg/idtools/utils_unix.go | 1 - .../docker/docker/pkg/ioutils/readers.go | 21 + .../docker/pkg/ioutils/tempdir_deprecated.go | 10 - .../docker/docker/pkg/ioutils/writers.go | 10 +- .../docker/docker/pkg/meminfo/meminfo.go | 26 - .../docker/pkg/meminfo/meminfo_linux.go | 69 -- .../docker/pkg/meminfo/meminfo_unsupported.go | 11 - .../docker/pkg/meminfo/meminfo_windows.go | 45 - .../docker/pkg/parsers/kernel/kernel.go | 1 - .../pkg/parsers/kernel/kernel_darwin.go | 1 - .../docker/pkg/parsers/kernel/kernel_unix.go | 7 +- .../pkg/parsers/kernel/kernel_windows.go | 1 - .../pkg/parsers/kernel/uname_unsupported.go | 1 - .../docker/docker/pkg/plugins/client.go | 39 +- .../docker/docker/pkg/plugins/discovery.go | 72 +- .../docker/pkg/plugins/discovery_unix.go | 18 +- .../docker/pkg/plugins/discovery_windows.go | 8 +- .../docker/docker/pkg/plugins/plugins.go | 35 +- .../docker/docker/pkg/plugins/plugins_unix.go | 10 - .../docker/pkg/plugins/plugins_windows.go | 7 - .../docker/pkg/plugins/transport/http.go | 19 +- .../docker/pkg/plugins/transport/mimetype.go | 6 + .../docker/pkg/plugins/transport/transport.go | 36 - .../docker/docker/pkg/process/doc.go | 3 - .../docker/docker/pkg/process/process_unix.go | 82 -- .../docker/pkg/process/process_windows.go | 52 - .../docker/docker/pkg/reexec/command_unix.go | 1 - .../docker/pkg/reexec/command_unsupported.go | 1 - .../docker/docker/pkg/rootless/rootless.go | 18 +- .../docker/pkg/sysinfo/cgroup2_linux.go | 7 +- .../docker/docker/pkg/sysinfo/numcpu_other.go | 1 - .../docker/pkg/sysinfo/sysinfo_linux.go | 5 +- .../docker/pkg/sysinfo/sysinfo_other.go | 1 - .../docker/pkg/system/chtimes_nowindows.go | 1 - .../docker/docker/pkg/system/errors.go | 13 +- .../docker/docker/pkg/system/filesys_unix.go | 1 - .../docker/docker/pkg/system/image_os.go | 10 - .../docker/pkg/system/image_os_deprecated.go | 19 + .../docker/docker/pkg/system/init_windows.go | 6 +- .../docker/docker/pkg/system/lstat_unix.go | 1 - .../docker/pkg/system/meminfo_deprecated.go | 16 - .../docker/docker/pkg/system/mknod.go | 1 - .../docker/docker/pkg/system/mknod_freebsd.go | 1 - .../docker/docker/pkg/system/mknod_unix.go | 1 - .../docker/docker/pkg/system/mknod_windows.go | 11 - .../docker/pkg/system/path_deprecated.go | 18 - .../docker/pkg/system/process_deprecated.go | 27 - .../docker/docker/pkg/system/stat_bsd.go | 7 +- .../docker/docker/pkg/system/stat_darwin.go | 6 +- .../docker/docker/pkg/system/stat_linux.go | 6 +- .../docker/docker/pkg/system/stat_openbsd.go | 6 +- .../docker/docker/pkg/system/stat_unix.go | 1 - .../docker/docker/pkg/system/stat_windows.go | 3 +- .../docker/docker/pkg/system/utimes_unix.go | 1 - .../docker/pkg/system/utimes_unsupported.go | 1 - .../docker/docker/pkg/system/xattrs.go | 18 + .../docker/docker/pkg/system/xattrs_linux.go | 18 +- .../docker/pkg/system/xattrs_unsupported.go | 1 - .../docker/docker/pkg/tarsum/fileinfosums.go | 2 + .../docker/docker/pkg/tarsum/tarsum.go | 12 +- .../docker/docker/pkg/tarsum/versioning.go | 28 +- .../docker/docker/plugin/v2/plugin_linux.go | 2 +- .../docker/plugin/v2/plugin_unsupported.go | 1 - .../docker/docker/runconfig/config_unix.go | 1 - .../docker/docker/runconfig/errors.go | 2 + .../docker/docker/runconfig/hostconfig.go | 7 +- .../docker/runconfig/hostconfig_unix.go | 4 +- .../docker/runconfig/hostconfig_windows.go | 3 +- .../docker/volume/mounts/linux_parser.go | 7 + .../docker/docker/volume/mounts/mounts.go | 9 +- .../docker/docker/volume/mounts/parser.go | 2 +- .../docker/docker/volume/mounts/validate.go | 1 + .../docker/volume/mounts/volume_unix.go | 1 - .../docker/volume/mounts/windows_parser.go | 4 +- .../github.com/klauspost/compress/README.md | 9 + .../klauspost/compress/zstd/enc_best.go | 11 +- .../frontend/dockerfile/instructions/bflag.go | 25 +- .../instructions/commands_runmount.go | 129 ++- .../instructions/commands_runnetwork.go | 12 +- .../instructions/commands_secrets.go | 5 - .../dockerfile/instructions/commands_ssh.go | 5 - .../frontend/dockerfile/instructions/parse.go | 14 +- .../frontend/dockerfile/parser/parser.go | 3 +- .../dockerfile/shell/equal_env_unix.go | 4 +- .../dockerfile/shell/equal_env_windows.go | 6 +- .../buildkit/frontend/dockerfile/shell/lex.go | 81 +- .../moby/buildkit/util/stack/stack.go | 2 +- .../moby/buildkit/util/stack/stack.pb.go | 2 +- vendor/github.com/opencontainers/runc/LICENSE | 191 ---- vendor/github.com/opencontainers/runc/NOTICE | 17 - .../runc/libcontainer/user/lookup_unix.go | 157 --- .../runc/libcontainer/user/user.go | 605 ------------ .../runc/libcontainer/user/user_fuzzer.go | 43 - .../prometheus/common/expfmt/decode.go | 36 +- .../prometheus/common/expfmt/fuzz.go | 4 +- .../common/expfmt/openmetrics_create.go | 22 +- .../prometheus/common/expfmt/text_create.go | 3 +- .../prometheus/common/expfmt/text_parse.go | 10 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 +- .../prometheus/common/model/time.go | 89 +- .../prometheus/common/model/value.go | 246 ++--- .../prometheus/common/model/value_float.go | 100 ++ .../common/model/value_histogram.go | 178 ++++ .../prometheus/common/model/value_type.go | 83 ++ .../prometheus/procfs/Makefile.common | 9 +- .../github.com/prometheus/procfs/cpuinfo.go | 36 + .../prometheus/procfs/cpuinfo_loong64.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 4 +- vendor/github.com/prometheus/procfs/doc.go | 51 +- .../prometheus/procfs/mountstats.go | 3 +- .../prometheus/procfs/net_softnet.go | 70 +- .../github.com/prometheus/procfs/netstat.go | 53 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../prometheus/procfs/proc_interrupts.go | 98 ++ .../prometheus/procfs/proc_netstat.go | 491 ++++----- .../github.com/prometheus/procfs/proc_snmp.go | 318 +++--- .../prometheus/procfs/proc_snmp6.go | 364 +++---- .../github.com/prometheus/procfs/proc_stat.go | 4 +- .../prometheus/procfs/proc_status.go | 6 +- vendor/github.com/prometheus/procfs/stat.go | 22 +- vendor/github.com/prometheus/procfs/thread.go | 79 ++ vendor/github.com/prometheus/procfs/vm.go | 4 +- .../rootlesskit/pkg/api/api.go | 39 - .../rootlesskit/pkg/api/client/client.go | 212 ---- .../rootlesskit/pkg/api/openapi.yaml | 171 ---- .../rootlesskit/pkg/port/port.go | 61 -- .../github.com/tonistiigi/fsutil/.gitignore | 9 - .../github.com/tonistiigi/fsutil/Dockerfile | 30 - vendor/github.com/tonistiigi/fsutil/LICENSE | 22 - .../tonistiigi/fsutil/chtimes_linux.go | 21 - .../tonistiigi/fsutil/chtimes_nolinux.go | 22 - vendor/github.com/tonistiigi/fsutil/diff.go | 51 - .../tonistiigi/fsutil/diff_containerd.go | 253 ----- .../tonistiigi/fsutil/diskwriter.go | 365 ------- .../tonistiigi/fsutil/diskwriter_freebsd.go | 17 - .../tonistiigi/fsutil/diskwriter_unix.go | 53 - .../tonistiigi/fsutil/diskwriter_unixnobsd.go | 17 - .../tonistiigi/fsutil/diskwriter_windows.go | 18 - .../tonistiigi/fsutil/docker-bake.hcl | 67 -- .../tonistiigi/fsutil/followlinks.go | 149 --- vendor/github.com/tonistiigi/fsutil/fs.go | 119 --- .../github.com/tonistiigi/fsutil/hardlinks.go | 48 - vendor/github.com/tonistiigi/fsutil/readme.md | 45 - .../github.com/tonistiigi/fsutil/receive.go | 286 ------ vendor/github.com/tonistiigi/fsutil/send.go | 208 ---- vendor/github.com/tonistiigi/fsutil/stat.go | 64 -- .../github.com/tonistiigi/fsutil/stat_unix.go | 72 -- .../tonistiigi/fsutil/stat_windows.go | 16 - .../github.com/tonistiigi/fsutil/tarwriter.go | 80 -- .../tonistiigi/fsutil/types/generate.go | 3 - .../tonistiigi/fsutil/types/stat.go | 7 - .../tonistiigi/fsutil/types/stat.pb.go | 929 ------------------ .../tonistiigi/fsutil/types/stat.proto | 19 - .../tonistiigi/fsutil/types/wire.pb.go | 575 ----------- .../tonistiigi/fsutil/types/wire.proto | 21 - .../github.com/tonistiigi/fsutil/validator.go | 93 -- vendor/github.com/tonistiigi/fsutil/walker.go | 357 ------- vendor/modules.txt | 38 +- 400 files changed, 6730 insertions(+), 10746 deletions(-) delete mode 100644 vendor/github.com/containerd/typeurl/.gitignore delete mode 100644 vendor/github.com/containerd/typeurl/LICENSE delete mode 100644 vendor/github.com/containerd/typeurl/README.md delete mode 100644 vendor/github.com/containerd/typeurl/doc.go delete mode 100644 vendor/github.com/containerd/typeurl/types.go create mode 100644 vendor/github.com/distribution/reference/.gitattributes create mode 100644 vendor/github.com/distribution/reference/.gitignore create mode 100644 vendor/github.com/distribution/reference/.golangci.yml create mode 100644 vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md create mode 100644 vendor/github.com/distribution/reference/CONTRIBUTING.md create mode 100644 vendor/github.com/distribution/reference/GOVERNANCE.md rename vendor/github.com/{rootless-containers/rootlesskit => distribution/reference}/LICENSE (99%) create mode 100644 vendor/github.com/distribution/reference/MAINTAINERS create mode 100644 vendor/github.com/distribution/reference/Makefile create mode 100644 vendor/github.com/distribution/reference/README.md create mode 100644 vendor/github.com/distribution/reference/SECURITY.md create mode 100644 vendor/github.com/distribution/reference/distribution-logo.svg create mode 100644 vendor/github.com/distribution/reference/helpers.go create mode 100644 vendor/github.com/distribution/reference/normalize.go create mode 100644 vendor/github.com/distribution/reference/reference.go create mode 100644 vendor/github.com/distribution/reference/regexp.go create mode 100644 vendor/github.com/distribution/reference/sort.go delete mode 100644 vendor/github.com/docker/docker/api/common_unix.go delete mode 100644 vendor/github.com/docker/docker/api/common_windows.go delete mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/checkpoint/list.go create mode 100644 vendor/github.com/docker/docker/api/types/checkpoint/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/container/errors.go create mode 100644 vendor/github.com/docker/docker/api/types/container/options.go rename vendor/github.com/docker/docker/api/types/{image_delete_response_item.go => image/delete_response.go} (68%) create mode 100644 vendor/github.com/docker/docker/api/types/image/image.go rename vendor/github.com/docker/docker/api/types/{image_summary.go => image/summary.go} (85%) create mode 100644 vendor/github.com/docker/docker/api/types/network/endpoint.go create mode 100644 vendor/github.com/docker/docker/api/types/network/ipam.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service_create_response.go rename vendor/github.com/docker/docker/api/types/{ => swarm}/service_update_response.go (95%) create mode 100644 vendor/github.com/docker/docker/api/types/system/info.go create mode 100644 vendor/github.com/docker/docker/api/types/system/runtime.go create mode 100644 vendor/github.com/docker/docker/api/types/system/security_opts.go create mode 100644 vendor/github.com/docker/docker/api/types/types_deprecated.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/generate.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto delete mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/container/attach_context.go create mode 100644 vendor/github.com/docker/docker/image/image_os.go create mode 100644 vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go create mode 100644 vendor/github.com/docker/docker/internal/multierror/multierror.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix_nolinux.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/meminfo/meminfo.go delete mode 100644 vendor/github.com/docker/docker/pkg/meminfo/meminfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/meminfo/meminfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/mimetype.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/transport.go delete mode 100644 vendor/github.com/docker/docker/pkg/process/doc.go delete mode 100644 vendor/github.com/docker/docker/pkg/process/process_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/process/process_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/image_os.go create mode 100644 vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/process_deprecated.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go delete mode 100644 vendor/github.com/opencontainers/runc/LICENSE delete mode 100644 vendor/github.com/opencontainers/runc/NOTICE delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_loong64.go create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 vendor/github.com/prometheus/procfs/thread.go delete mode 100644 vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go delete mode 100644 vendor/github.com/rootless-containers/rootlesskit/pkg/api/client/client.go delete mode 100644 vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml delete mode 100644 vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/.gitignore delete mode 100644 vendor/github.com/tonistiigi/fsutil/Dockerfile delete mode 100644 vendor/github.com/tonistiigi/fsutil/LICENSE delete mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_linux.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diff.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/docker-bake.hcl delete mode 100644 vendor/github.com/tonistiigi/fsutil/followlinks.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/fs.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/hardlinks.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/readme.md delete mode 100644 vendor/github.com/tonistiigi/fsutil/receive.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/send.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat_unix.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat_windows.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/tarwriter.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/generate.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.pb.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.proto delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.pb.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.proto delete mode 100644 vendor/github.com/tonistiigi/fsutil/validator.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/walker.go diff --git a/go.mod b/go.mod index fba5669630..99b6dcdedd 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231213181459-b0fcec718dc6 github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 github.com/containerd/cgroups v1.1.0 // indirect - github.com/docker/docker v24.0.7+incompatible + github.com/docker/docker v25.0.3+incompatible github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-git/v5 v5.11.0 github.com/golang/mock v1.6.0 @@ -27,7 +27,7 @@ require ( github.com/google/slowjam v1.1.0 github.com/karrick/godirwalk v1.16.1 github.com/minio/highwayhash v1.0.2 - github.com/moby/buildkit v0.11.6 + github.com/moby/buildkit v0.12.5 github.com/otiai10/copy v1.14.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 @@ -85,7 +85,6 @@ require ( github.com/containerd/continuity v0.4.2 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/containerd/typeurl v1.0.2 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/cli v24.0.7+incompatible // indirect @@ -112,7 +111,7 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/locker v1.0.1 // indirect @@ -131,11 +130,11 @@ require ( github.com/opencontainers/selinux v1.11.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/rootless-containers/rootlesskit v1.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect - github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa // indirect + github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect github.com/vbatts/tar-split v0.11.3 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect go.etcd.io/etcd/raft/v3 v3.5.6 // indirect @@ -169,6 +168,7 @@ require ( github.com/containerd/ttrpc v1.2.2 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/distribution/reference v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect diff --git a/go.sum b/go.sum index 5bb1da8c1b..0640d339e8 100644 --- a/go.sum +++ b/go.sum @@ -1,48 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= @@ -85,7 +53,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/docker-credential-gcr v1.5.1-0.20230328182921-62afb2723512 h1:cauzkEqKnOXwl0L5futNq6BbWWtIRPfJtwDDulac6A8= github.com/GoogleCloudPlatform/docker-credential-gcr v1.5.1-0.20230328182921-62afb2723512/go.mod h1:6u40niDQl1ufFtykax72lx+pcIO67AoUSu9ebOzdA8Q= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= @@ -98,10 +65,7 @@ github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azu github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= @@ -156,15 +120,10 @@ github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7N github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -192,8 +151,6 @@ github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSk github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= -github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -209,6 +166,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -216,6 +175,8 @@ github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m3 github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= +github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -250,17 +211,9 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -280,25 +233,16 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -306,21 +250,16 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -334,18 +273,7 @@ github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+u github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/slowjam v1.1.0 h1:mENjYWhA6/p7P4vRwfMA5mfLEjOk48fKScuzryYnFA0= @@ -357,8 +285,6 @@ github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -369,12 +295,10 @@ github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wW github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -383,16 +307,9 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= @@ -400,10 +317,9 @@ github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -422,8 +338,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/buildkit v0.11.6 h1:VYNdoKk5TVxN7k4RvZgdeM4GOyRvIi4Z8MXOY7xvyUs= -github.com/moby/buildkit v0.11.6/go.mod h1:GCqKfHhz+pddzfgaR7WmHVEE3nKKZMMDPpK8mh3ZLv4= +github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= +github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= @@ -448,11 +364,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -481,33 +395,23 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4tU1p9YHN4+suwV7M= github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo= @@ -519,8 +423,6 @@ github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWR github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -556,8 +458,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa h1:XOFp/3aBXlqmOFAg3r6e0qQjPnK5I970LilqX+Is1W8= -github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8= +github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb h1:uUe8rNyVXM8moActoBol6Xf6xX2GMr7SosR2EywMvGg= +github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb/go.mod h1:SxX/oNQ/ag6Vaoli547ipFK9J7BZn5JqJG0JE8lf8bA= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= @@ -565,20 +467,13 @@ github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RV github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50= go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= @@ -601,8 +496,6 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -615,35 +508,11 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20231219160207-73b9e39aefca h1:+xQfFu/HO/82Wwg4zuJ5xiLp0yaOLJjBGnuafXp85YQ= golang.org/x/exp v0.0.0-20231219160207-73b9e39aefca/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -654,39 +523,17 @@ golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -694,22 +541,13 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -722,35 +560,9 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -758,12 +570,9 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -781,10 +590,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -794,9 +600,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -804,43 +607,9 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -852,61 +621,16 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= @@ -914,17 +638,10 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= @@ -936,7 +653,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -945,20 +661,15 @@ google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7 google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -967,12 +678,4 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/commands/healthcheck.go b/pkg/commands/healthcheck.go index af40ba6526..55474bb781 100644 --- a/pkg/commands/healthcheck.go +++ b/pkg/commands/healthcheck.go @@ -18,10 +18,21 @@ package commands import ( "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/docker/docker/api/types/container" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/moby/buildkit/frontend/dockerfile/instructions" ) +func convertDockerHealthConfigToContainerRegistryFormat(dockerHealthcheck container.HealthConfig) v1.HealthConfig { + return v1.HealthConfig{ + Test: dockerHealthcheck.Test, + Interval: dockerHealthcheck.Interval, + Timeout: dockerHealthcheck.Timeout, + StartPeriod: dockerHealthcheck.StartPeriod, + Retries: dockerHealthcheck.Retries, + } +} + type HealthCheckCommand struct { BaseCommand cmd *instructions.HealthCheckCommand @@ -29,7 +40,7 @@ type HealthCheckCommand struct { // ExecuteCommand handles command processing similar to CMD and RUN, func (h *HealthCheckCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { - check := v1.HealthConfig(*h.cmd.Health) + check := convertDockerHealthConfigToContainerRegistryFormat(*h.cmd.Health) config.Healthcheck = &check return nil diff --git a/vendor/github.com/containerd/typeurl/.gitignore b/vendor/github.com/containerd/typeurl/.gitignore deleted file mode 100644 index d53846778b..0000000000 --- a/vendor/github.com/containerd/typeurl/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -coverage.txt diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/containerd/typeurl/LICENSE deleted file mode 100644 index 584149b6ee..0000000000 --- a/vendor/github.com/containerd/typeurl/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md deleted file mode 100644 index d021e96724..0000000000 --- a/vendor/github.com/containerd/typeurl/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# typeurl - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) -[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) - -A Go package for managing the registration, marshaling, and unmarshaling of encoded types. - -This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto). - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go deleted file mode 100644 index c0d0fd2053..0000000000 --- a/vendor/github.com/containerd/typeurl/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go deleted file mode 100644 index 647d419a29..0000000000 --- a/vendor/github.com/containerd/typeurl/types.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "encoding/json" - "path" - "reflect" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -var ( - mu sync.RWMutex - registry = make(map[reflect.Type]string) -) - -// Definitions of common error types used throughout typeurl. -// -// These error types are used with errors.Wrap and errors.Wrapf to add context -// to an error. -// -// To detect an error class, use errors.Is() functions to tell whether an -// error is of this type. -var ( - ErrNotFound = errors.New("not found") -) - -// Register a type with a base URL for JSON marshaling. When the MarshalAny and -// UnmarshalAny functions are called they will treat the Any type value as JSON. -// To use protocol buffers for handling the Any value the proto.Register -// function should be used instead of this function. -func Register(v interface{}, args ...string) { - var ( - t = tryDereference(v) - p = path.Join(args...) - ) - mu.Lock() - defer mu.Unlock() - if et, ok := registry[t]; ok { - if et != p { - panic(errors.Errorf("type registered with alternate path %q != %q", et, p)) - } - return - } - registry[t] = p -} - -// TypeURL returns the type url for a registered type. -func TypeURL(v interface{}) (string, error) { - mu.RLock() - u, ok := registry[tryDereference(v)] - mu.RUnlock() - if !ok { - // fallback to the proto registry if it is a proto message - pb, ok := v.(proto.Message) - if !ok { - return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v)) - } - return proto.MessageName(pb), nil - } - return u, nil -} - -// Is returns true if the type of the Any is the same as v. -func Is(any *types.Any, v interface{}) bool { - // call to check that v is a pointer - tryDereference(v) - url, err := TypeURL(v) - if err != nil { - return false - } - return any.TypeUrl == url -} - -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. -func MarshalAny(v interface{}) (*types.Any, error) { - var marshal func(v interface{}) ([]byte, error) - switch t := v.(type) { - case *types.Any: - // avoid reserializing the type if we have an any. - return t, nil - case proto.Message: - marshal = func(v interface{}) ([]byte, error) { - return proto.Marshal(t) - } - default: - marshal = json.Marshal - } - - url, err := TypeURL(v) - if err != nil { - return nil, err - } - - data, err := marshal(v) - if err != nil { - return nil, err - } - return &types.Any{ - TypeUrl: url, - Value: data, - }, nil -} - -// UnmarshalAny unmarshals the any type into a concrete type. -func UnmarshalAny(any *types.Any) (interface{}, error) { - return UnmarshalByTypeURL(any.TypeUrl, any.Value) -} - -// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. -func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { - return unmarshal(typeURL, value, nil) -} - -// UnmarshalTo unmarshals the any type into a concrete type passed in the out -// argument. It is identical to UnmarshalAny, but lets clients provide a -// destination type through the out argument. -func UnmarshalTo(any *types.Any, out interface{}) error { - return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out) -} - -// UnmarshalTo unmarshals the given type and value into a concrete type passed -// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients -// provide a destination type through the out argument. -func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { - _, err := unmarshal(typeURL, value, out) - return err -} - -func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) - if err != nil { - return nil, err - } - - if v == nil { - v = reflect.New(t.t).Interface() - } else { - // Validate interface type provided by client - vURL, err := TypeURL(v) - if err != nil { - return nil, err - } - if typeURL != vURL { - return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) - } - } - - if t.isProto { - err = proto.Unmarshal(value, v.(proto.Message)) - } else { - err = json.Unmarshal(value, v) - } - - return v, err -} - -type urlType struct { - t reflect.Type - isProto bool -} - -func getTypeByUrl(url string) (urlType, error) { - mu.RLock() - for t, u := range registry { - if u == url { - mu.RUnlock() - return urlType{ - t: t, - }, nil - } - } - mu.RUnlock() - // fallback to proto registry - t := proto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } - return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url) -} - -func tryDereference(v interface{}) reflect.Type { - t := reflect.TypeOf(v) - if t.Kind() == reflect.Ptr { - // require check of pointer but dereference to register - return t.Elem() - } - panic("v is not a pointer to a type") -} diff --git a/vendor/github.com/distribution/reference/.gitattributes b/vendor/github.com/distribution/reference/.gitattributes new file mode 100644 index 0000000000..d207b1802b --- /dev/null +++ b/vendor/github.com/distribution/reference/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/distribution/reference/.gitignore b/vendor/github.com/distribution/reference/.gitignore new file mode 100644 index 0000000000..dc07e6b04a --- /dev/null +++ b/vendor/github.com/distribution/reference/.gitignore @@ -0,0 +1,2 @@ +# Cover profiles +*.out diff --git a/vendor/github.com/distribution/reference/.golangci.yml b/vendor/github.com/distribution/reference/.golangci.yml new file mode 100644 index 0000000000..793f0bb7ec --- /dev/null +++ b/vendor/github.com/distribution/reference/.golangci.yml @@ -0,0 +1,18 @@ +linters: + enable: + - bodyclose + - dupword # Checks for duplicate words in the source code + - gofmt + - goimports + - ineffassign + - misspell + - revive + - staticcheck + - unconvert + - unused + - vet + disable: + - errcheck + +run: + deadline: 2m diff --git a/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..48f6704c6d --- /dev/null +++ b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). + +Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct. diff --git a/vendor/github.com/distribution/reference/CONTRIBUTING.md b/vendor/github.com/distribution/reference/CONTRIBUTING.md new file mode 100644 index 0000000000..ab21946656 --- /dev/null +++ b/vendor/github.com/distribution/reference/CONTRIBUTING.md @@ -0,0 +1,114 @@ +# Contributing to the reference library + +## Community help + +If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack. +[Click here for an invite to the CNCF community slack](https://slack.cncf.io/) + +## Reporting security issues + +The maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of (or similar for other container tools): + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing Code + +Contributions should be made via pull requests. Pull requests will be reviewed +by one or more maintainers or reviewers and merged when acceptable. + +You should follow the basic GitHub workflow: + + 1. Use your own [fork](https://help.github.com/en/articles/about-forks) + 2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) + 3. Test your code + 4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) + 5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) + +Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) +for tips on creating a successful contribution. + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/distribution/reference/GOVERNANCE.md b/vendor/github.com/distribution/reference/GOVERNANCE.md new file mode 100644 index 0000000000..200045b050 --- /dev/null +++ b/vendor/github.com/distribution/reference/GOVERNANCE.md @@ -0,0 +1,144 @@ +# distribution/reference Project Governance + +Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here. + +For specific guidance on practical contribution steps please +see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide. + +## Maintainership + +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. + +## Reviewers + +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM counts towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. + +## Adding maintainers + +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed in a pull request or a +maintainers communication channel. + +After a candidate has been announced to the maintainers, the existing +maintainers are given five business days to discuss the candidate, raise +objections and cast their vote. Votes may take place on the communication +channel or via pull request comment. Candidates must be approved by at least 66% +of the current maintainers by adding their vote on the mailing list. The +reviewer role has the same process but only requires 33% of current maintainers. +Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The voting process may take place inside a pull request if a +maintainer has already discussed the candidacy with the candidate and a +maintainer is willing to be a sponsor by opening the pull request. The candidate +becomes a maintainer once the pull request is merged. + +## Stepping down policy + +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. + +## Removal of inactive maintainers + +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of the current +maintainers. In this case, maintainers should first propose the change to +maintainers via the maintainers communication channel, then open a pull request +for voting. The voting period is five business days. The voting pull request +should not come as a surpise to any maintainer and any discussion related to +performance must not be discussed on the pull request. + +## How are decisions made? + +Docker distribution is an open-source project with an open design philosophy. +This means that the repository is the source of truth for EVERY aspect of the +project, including its philosophy, design, road map, and APIs. *If it's part of +the project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. + +## Helping contributors with the DCO + +The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. + +## I'm a maintainer. Should I make pull requests too? + +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. + +## Conflict Resolution + +If you have a technical dispute that you feel has reached an impasse with a +subset of the community, any contributor may open an issue, specifically +calling for a resolution vote of the current core maintainers to resolve the +dispute. The same voting quorums required (2/3) for adding and removing +maintainers will apply to conflict resolution. diff --git a/vendor/github.com/rootless-containers/rootlesskit/LICENSE b/vendor/github.com/distribution/reference/LICENSE similarity index 99% rename from vendor/github.com/rootless-containers/rootlesskit/LICENSE rename to vendor/github.com/distribution/reference/LICENSE index d645695673..e06d208186 100644 --- a/vendor/github.com/rootless-containers/rootlesskit/LICENSE +++ b/vendor/github.com/distribution/reference/LICENSE @@ -1,5 +1,4 @@ - - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/vendor/github.com/distribution/reference/MAINTAINERS b/vendor/github.com/distribution/reference/MAINTAINERS new file mode 100644 index 0000000000..9e0a60c8bd --- /dev/null +++ b/vendor/github.com/distribution/reference/MAINTAINERS @@ -0,0 +1,26 @@ +# Distribution project maintainers & reviewers +# +# See GOVERNANCE.md for maintainer versus reviewer roles +# +# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io) +# GitHub ID, Name, Email address +"chrispat","Chris Patterson","chrispat@github.com" +"clarkbw","Bryan Clark","clarkbw@github.com" +"corhere","Cory Snider","csnider@mirantis.com" +"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com" +"heww","He Weiwei","hweiwei@vmware.com" +"joaodrp","João Pereira","jpereira@gitlab.com" +"justincormack","Justin Cormack","justin.cormack@docker.com" +"squizzi","Kyle Squizzato","ksquizzato@mirantis.com" +"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com" +"sargun","Sargun Dhillon","sargun@sargun.me" +"wy65701436","Wang Yan","wangyan@vmware.com" +"stevelasker","Steve Lasker","steve.lasker@microsoft.com" +# +# REVIEWERS +# GitHub ID, Name, Email address +"dmcgowan","Derek McGowan","derek@mcgstyle.net" +"stevvooe","Stephen Day","stevvooe@gmail.com" +"thajeztah","Sebastiaan van Stijn","github@gone.nl" +"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com" +"Jamstah", "James Hewitt", "james.hewitt@gmail.com" diff --git a/vendor/github.com/distribution/reference/Makefile b/vendor/github.com/distribution/reference/Makefile new file mode 100644 index 0000000000..c78576b75d --- /dev/null +++ b/vendor/github.com/distribution/reference/Makefile @@ -0,0 +1,25 @@ +# Project packages. +PACKAGES=$(shell go list ./...) + +# Flags passed to `go test` +BUILDFLAGS ?= +TESTFLAGS ?= + +.PHONY: all build test coverage +.DEFAULT: all + +all: build + +build: ## no binaries to build, so just check compilation suceeds + go build ${BUILDFLAGS} ./... + +test: ## run tests + go test ${TESTFLAGS} ./... + +coverage: ## generate coverprofiles from the unit tests + rm -f coverage.txt + go test ${TESTFLAGS} -cover -coverprofile=cover.out ./... + +.PHONY: help +help: + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md new file mode 100644 index 0000000000..e2531e49c4 --- /dev/null +++ b/vendor/github.com/distribution/reference/README.md @@ -0,0 +1,30 @@ +# Distribution reference + +Go library to handle references to container images. + + + +[![Build Status](https://github.com/distribution/reference/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/distribution/reference/actions?query=workflow%3ACI) +[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/reference) +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE) +[![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) + +This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. + +## Contribution + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. + +## Communication + +For async communication and long running discussions please use issues and pull requests on the github repo. +This will be the best place to discuss design and implementation. + +For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/) +that everyone is welcome to join and chat about development. + +## Licenses + +The distribution codebase is released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/distribution/reference/SECURITY.md b/vendor/github.com/distribution/reference/SECURITY.md new file mode 100644 index 0000000000..aaf983c0f0 --- /dev/null +++ b/vendor/github.com/distribution/reference/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +## Reporting a Vulnerability + +The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! + +Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io. diff --git a/vendor/github.com/distribution/reference/distribution-logo.svg b/vendor/github.com/distribution/reference/distribution-logo.svg new file mode 100644 index 0000000000..cc9f4073b9 --- /dev/null +++ b/vendor/github.com/distribution/reference/distribution-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/github.com/distribution/reference/helpers.go b/vendor/github.com/distribution/reference/helpers.go new file mode 100644 index 0000000000..d10c7ef838 --- /dev/null +++ b/vendor/github.com/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See [path.Match] for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go new file mode 100644 index 0000000000..a30229d01b --- /dev/null +++ b/vendor/github.com/distribution/reference/normalize.go @@ -0,0 +1,224 @@ +package reference + +import ( + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // legacyDefaultDomain is the legacy domain for Docker Hub (which was + // originally named "the Docker Index"). This domain is still used for + // authentication and image search, which were part of the "v1" Docker + // registry specification. + // + // This domain will continue to be supported, but there are plans to consolidate + // legacy domains to new "canonical" domains. Once those domains are decided + // on, we must update the normalization functions, but preserve compatibility + // with existing installs, clients, and user configuration. + legacyDefaultDomain = "index.docker.io" + + // defaultDomain is the default domain used for images on Docker Hub. + // It is used to normalize "familiar" names to canonical names, for example, + // to convert "ubuntu" to "docker.io/library/ubuntu:latest". + // + // Note that actual domain of Docker Hub's registry is registry-1.docker.io. + // This domain will continue to be supported, but there are plans to consolidate + // legacy domains to new "canonical" domains. Once those domains are decided + // on, we must update the normalization functions, but preserve compatibility + // with existing installs, clients, and user configuration. + defaultDomain = "docker.io" + + // officialRepoPrefix is the namespace used for official images on Docker Hub. + // It is used to normalize "familiar" names to canonical names, for example, + // to convert "ubuntu" to "docker.io/library/ubuntu:latest". + officialRepoPrefix = "library/" + + // defaultTag is the default tag if no tag is provided. + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remote string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remote = remainder[:tagSep] + } else { + remote = remainder + } + if strings.ToLower(remote) != remote { + return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote) + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// namedTaggedDigested is a reference that has both a tag and a digest. +type namedTaggedDigested interface { + NamedTagged + Digested +} + +// ParseDockerRef normalizes the image reference following the docker convention, +// which allows for references to contain both a tag and a digest. It returns a +// reference that is either tagged or digested. For references containing both +// a tag and a digest, it returns a digested reference. For example, the following +// reference: +// +// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa +// +// Is returned as a digested reference (with the ":latest" tag removed): +// +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa +// +// References that are already "tagged" or "digested" are returned unmodified: +// +// // Already a digested reference +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa +// +// // Already a named reference +// docker.io/library/busybox:latest +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if canonical, ok := named.(namedTaggedDigested); ok { + // The reference is both tagged and digested; only return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + return WithDigest(newNamed, canonical.Digest()) + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remote-name. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoPrefix + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if strings.HasPrefix(repo.path, officialRepoPrefix) { + // TODO(thaJeztah): this check may be too strict, as it assumes the + // "library/" namespace does not have nested namespaces. While this + // is true (currently), technically it would be possible for Docker + // Hub to use those (e.g. "library/distros/ubuntu:latest"). + // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785. + if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') { + repo.path = remainder + } + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go new file mode 100644 index 0000000000..e98c44daa2 --- /dev/null +++ b/vendor/github.com/distribution/reference/reference.go @@ -0,0 +1,436 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] remote-name +// domain := host [':' port-number] +// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A +// domain-name := domain-component ['.' domain-component]* +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// path (or "remote-name") := path-component ['/' path-component]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the [Named] reference. +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the [Named] reference. +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// +// Deprecated: Use [Domain] or [Path]. +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) + } + return repo +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/distribution/reference/regexp.go b/vendor/github.com/distribution/reference/regexp.go new file mode 100644 index 0000000000..65bc49d79b --- /dev/null +++ b/vendor/github.com/distribution/reference/regexp.go @@ -0,0 +1,163 @@ +package reference + +import ( + "regexp" + "strings" +) + +// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). +var DigestRegexp = regexp.MustCompile(digestPat) + +// DomainRegexp matches hostname or IP-addresses, optionally including a port +// number. It defines the structure of potential domain components that may be +// part of image names. This is purposely a subset of what is allowed by DNS to +// ensure backwards compatibility with Docker image names. It may be a subset of +// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between +// square brackets (excluding zone identifiers as defined by [RFC 6874] or special +// addresses such as IPv4-Mapped). +// +// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. +var DomainRegexp = regexp.MustCompile(domainAndPort) + +// IdentifierRegexp is the format for string identifier used as a +// content addressable identifier using sha256. These identifiers +// are like digests without the algorithm, since sha256 is used. +var IdentifierRegexp = regexp.MustCompile(identifier) + +// NameRegexp is the format for the name component of references, including +// an optional domain and port, but without tag or digest suffix. +var NameRegexp = regexp.MustCompile(namePat) + +// ReferenceRegexp is the full supported format of a reference. The regexp +// is anchored and has capturing groups for name, tag, and digest +// components. +var ReferenceRegexp = regexp.MustCompile(referencePat) + +// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. +// +// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 +var TagRegexp = regexp.MustCompile(tag) + +const ( + // alphanumeric defines the alphanumeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphanumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allows one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]+)` + + // localhost is treated as a special value for domain-name. Any other + // domain-name without a "." or a ":port" are considered a path component. + localhost = `localhost` + + // domainNameComponent restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp. + domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + + // optionalPort matches an optional port-number including the port separator + // (e.g. ":80"). + optionalPort = `(?::[0-9]+)?` + + // tag matches valid tag names. From docker/docker:graph/tags.go. + tag = `[\w][\w.-]{0,127}` + + // digestPat matches well-formed digests, including algorithm (e.g. "sha256:"). + // + // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp + // so that go-digest defines the canonical format. Note that the go-digest is + // more relaxed: + // - it allows multiple algorithms (e.g. "sha256+b64:") to allow + // future expansion of supported algorithms. + // - it allows the "" value to use urlsafe base64 encoding as defined + // in [rfc4648, section 5]. + // + // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5. + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + + // identifier is the format for a content addressable identifier using sha256. + // These identifiers are like digests without the algorithm, since sha256 is used. + identifier = `([a-f0-9]{64})` + + // ipv6address are enclosed between square brackets and may be represented + // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format + // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as + // IPv4-Mapped are deliberately excluded. + ipv6address = `\[(?:[a-fA-F0-9:]+)\]` +) + +var ( + // domainName defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. This includes IPv4 addresses on decimal format. + domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent) + + // host defines the structure of potential domains based on the URI + // Host subcomponent on rfc3986. It may be a subset of DNS domain name, + // or an IPv4 address in decimal format, or an IPv6 address between square + // brackets (excluding zone identifiers as defined by rfc6874 or special + // addresses such as IPv4-Mapped). + host = `(?:` + domainName + `|` + ipv6address + `)` + + // allowed by the URI Host subcomponent on rfc3986 to ensure backwards + // compatibility with Docker image names. + domainAndPort = host + optionalPort + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = regexp.MustCompile(anchored(tag)) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat)) + + // pathComponent restricts path-components to start with an alphanumeric + // character, with following parts able to be separated by a separator + // (one period, one or two underscore and multiple dashes). + pathComponent = alphanumeric + anyTimes(separator+alphanumeric) + + // remoteName matches the remote-name of a repository. It consists of one + // or more forward slash (/) delimited path-components: + // + // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu" + remoteName = pathComponent + anyTimes(`/`+pathComponent) + namePat = optional(domainAndPort+`/`) + remoteName + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName))) + + referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat))) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier)) +) + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...string) string { + return `(?:` + strings.Join(res, "") + `)?` +} + +// anyTimes wraps the expression in a non-capturing group that can occur +// any number of times. +func anyTimes(res ...string) string { + return `(?:` + strings.Join(res, "") + `)*` +} + +// capture wraps the expression in a capturing group. +func capture(res ...string) string { + return `(` + strings.Join(res, "") + `)` +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...string) string { + return `^` + strings.Join(res, "") + `$` +} diff --git a/vendor/github.com/distribution/reference/sort.go b/vendor/github.com/distribution/reference/sort.go new file mode 100644 index 0000000000..416c37b076 --- /dev/null +++ b/vendor/github.com/distribution/reference/sort.go @@ -0,0 +1,75 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reference + +import ( + "sort" +) + +// Sort sorts string references preferring higher information references. +// +// The precedence is as follows: +// +// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:") +// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest") +// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:") +// 4. [Named] (e.g., "docker.io/library/busybox") +// 5. [Digested] (e.g., "docker.io@sha256:") +// 6. Parse error +func Sort(references []string) []string { + var prefs []Reference + var bad []string + + for _, ref := range references { + pref, err := ParseAnyReference(ref) + if err != nil { + bad = append(bad, ref) + } else { + prefs = append(prefs, pref) + } + } + sort.Slice(prefs, func(a, b int) bool { + ar := refRank(prefs[a]) + br := refRank(prefs[b]) + if ar == br { + return prefs[a].String() < prefs[b].String() + } + return ar < br + }) + sort.Strings(bad) + var refs []string + for _, pref := range prefs { + refs = append(refs, pref.String()) + } + return append(refs, bad...) +} + +func refRank(ref Reference) uint8 { + if _, ok := ref.(Named); ok { + if _, ok = ref.(Tagged); ok { + if _, ok = ref.(Digested); ok { + return 1 + } + return 2 + } + if _, ok = ref.(Digested); ok { + return 3 + } + return 4 + } + return 5 +} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index b314181925..48d04f9a98 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -27,6 +27,7 @@ Adam Miller Adam Mills Adam Pointer Adam Singer +Adam Thornton Adam Walz Adam Williams AdamKorcz @@ -173,6 +174,7 @@ Andy Rothfusz Andy Smith Andy Wilson Andy Zhang +Aneesh Kulkarni Anes Hasicic Angel Velazquez Anil Belur @@ -236,6 +238,7 @@ Ben Golub Ben Gould Ben Hall Ben Langfeld +Ben Lovy Ben Sargent Ben Severson Ben Toews @@ -262,7 +265,7 @@ Billy Ridgway Bily Zhang Bin Liu Bingshen Wang -Bjorn Neergaard +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott @@ -279,6 +282,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins Brett Kochendorfer @@ -363,6 +367,7 @@ chenyuzhu Chetan Birajdar Chewey Chia-liang Kao +Chiranjeevi Tirunagari chli Cholerae Hu Chris Alfonso @@ -433,8 +438,8 @@ Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian +cui fliter CUI Wei -cuishuang Cuong Manh Le Cyprian Gracz Cyril F @@ -513,6 +518,7 @@ David Dooling David Gageot David Gebler David Glasser +David Karlsson <35727626+dvdksn@users.noreply.github.com> David Lawrence David Lechner David M. Karr @@ -602,6 +608,7 @@ Donald Huang Dong Chen Donghwa Kim Donovan Jones +Dorin Geman Doron Podoleanu Doug Davis Doug MacEachern @@ -636,6 +643,7 @@ Emily Rose Emir Ozer Eng Zer Jun Enguerran +Enrico Weigelt, metux IT consult Eohyung Lee epeterso er0k @@ -676,6 +684,7 @@ Evan Allrich Evan Carmi Evan Hazlett Evan Krall +Evan Lezar Evan Phoenix Evan Wies Evelyn Xu @@ -744,6 +753,7 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin +Frank Villaro-Dixon Frank Yang Fred Lifton Frederick F. Kautz IV @@ -983,6 +993,7 @@ Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Christophe Berthon +Jean-Michel Rouet Jean-Paul Calderone Jean-Pierre Huynh Jean-Tiare Le Bigot @@ -1013,6 +1024,7 @@ Jeroen Jacobs Jesse Dearing Jesse Dubay Jessica Frazelle +Jeyanthinath Muthuram Jezeniel Zapanta Jhon Honce Ji.Zhilong @@ -1141,6 +1153,7 @@ junxu Jussi Nummelin Justas Brazauskas Justen Martin +Justin Chadwell Justin Cormack Justin Force Justin Keller <85903732+jk-vb@users.noreply.github.com> @@ -1183,6 +1196,7 @@ Ke Xu Kei Ohmura Keith Hudgins Keli Hu +Ken Bannister Ken Cochrane Ken Herner Ken ICHIKAWA @@ -1192,7 +1206,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert -Kevin Alvarez +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1225,6 +1239,7 @@ Konstantin Gribov Konstantin L Konstantin Pelykh Kostadin Plachkov +kpcyrd Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister @@ -1306,6 +1321,7 @@ Lorenzo Fontana Lotus Fenn Louis Delossantos Louis Opter +Luboslav Pivarc Luca Favatella Luca Marturana Luca Orlandi @@ -1344,6 +1360,7 @@ Manuel Meurer Manuel Rüger Manuel Woelker mapk0y +Marat Radchenko Marc Abramowitz Marc Kuo Marc Tamsky @@ -1383,6 +1400,7 @@ Martijn van Oosterhout Martin Braun Martin Dojcak Martin Honermeyer +Martin Jirku Martin Kelly Martin Mosegaard Amdisen Martin Muzatko @@ -1461,6 +1479,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kebe Michael Kuehn Michael Käufl Michael Neale @@ -1509,10 +1528,11 @@ Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer +Mike Sul mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi -Milas Bowman +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1524,6 +1544,7 @@ mlarcher Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari +Mohd Sadiq Mohit Soni Moorthy RS Morgan Bauer @@ -1606,6 +1627,7 @@ Noah Treuhaft NobodyOnSE noducks Nolan Darilek +Nolan Miles Noriki Nakamura nponeccop Nurahmadie @@ -1661,6 +1683,7 @@ Paul Lietar Paul Liljenberg Paul Morie Paul Nasrat +Paul Seiffert Paul Weaver Paulo Gomes Paulo Ribeiro @@ -1674,6 +1697,7 @@ Pavlos Ratis Pavol Vargovcik Pawel Konczalski Paweł Gronowski +payall4u Peeyush Gupta Peggy Li Pei Su @@ -1703,7 +1727,9 @@ Phil Estes Phil Sphicas Phil Spitler Philip Alexander Etling +Philip K. Warren Philip Monroe +Philipp Fruck Philipp Gillé Philipp Wahala Philipp Weissensteiner @@ -1741,6 +1767,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Rachit Sharma Radostin Stoyanov Rafal Jeczalik Rafe Colton @@ -1773,6 +1800,7 @@ Rich Horwood Rich Moyse Rich Seymour Richard Burnison +Richard Hansen Richard Harvey Richard Mathie Richard Metzler @@ -1788,6 +1816,7 @@ Ritesh H Shukla Riyaz Faizullabhoy Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich +Rob Murray Rob Vesse Robert Bachmann Robert Bittle @@ -1869,6 +1898,7 @@ ryancooper7 RyanDeng Ryo Nakao Ryoga Saito +Régis Behmo Rémy Greinhofer s. rannou Sabin Basyal @@ -1885,6 +1915,7 @@ Sam J Sharpe Sam Neirinck Sam Reis Sam Rijs +Sam Thibault Sam Whited Sambuddha Basu Sami Wagiaalla @@ -1908,6 +1939,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Moser Scott Percival Scott Stamp Scott Walls @@ -1923,6 +1955,7 @@ Sebastiaan van Steenis Sebastiaan van Stijn Sebastian Höffner Sebastian Radloff +Sebastian Thomschke Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran @@ -1996,6 +2029,7 @@ Stanislav Bondarenko Stanislav Levin Steeve Morin Stefan Berger +Stefan Gehrig Stefan J. Wernli Stefan Praszalowicz Stefan S. @@ -2003,6 +2037,7 @@ Stefan Scherer Stefan Staudenmeyer Stefan Weil Steffen Butzer +Stephan Henningsen Stephan Spindler Stephen Benjamin Stephen Crosby @@ -2204,6 +2239,7 @@ Vinod Kulkarni Vishal Doshi Vishnu Kannan Vitaly Ostrosablin +Vitor Anjos Vitor Monteiro Vivek Agarwal Vivek Dasgupta @@ -2250,6 +2286,7 @@ Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang Wes Morgan +Wesley Pettit Wewang Xiaorenfine Wiktor Kwapisiewicz Will Dietz @@ -2289,7 +2326,7 @@ xiekeyang Ximo Guanter Gonzálbez xin.li Xinbo Weng -Xinfeng Liu +Xinfeng Liu Xinzi Zhou Xiuming Chen Xuecong Liao @@ -2355,6 +2392,7 @@ Zen Lin(Zhinan Lin) Zhang Kun Zhang Wei Zhang Wentao +zhangguanzhang ZhangHang zhangxianwei Zhenan Ye <21551168@zju.edu.cn> @@ -2381,6 +2419,7 @@ Zuhayr Elahi Zunayed Ali Álvaro Lázaro Átila Camurça Alves +吴小白 <296015668@qq.com> 尹吉峰 屈骏 徐俊杰 diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md index f136c3433a..381f19881f 100644 --- a/vendor/github.com/docker/docker/api/README.md +++ b/vendor/github.com/docker/docker/api/README.md @@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. +Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index cba66bc462..37e553d418 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.43" + DefaultVersion = "1.44" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 19fc63d658..0000000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows -// +build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba5479b..0000000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 7635b9f665..e55a76fc63 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.43" +basePath: "/v1.44" info: title: "Docker Engine API" - version: "1.43" + version: "1.44" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.43) is used. - For example, calling `/info` is the same as calling `/v1.43/info`. Using the + If you omit the version-prefix, the current version of the API (v1.44) is used. + For example, calling `/info` is the same as calling `/v1.44/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -388,6 +388,16 @@ definitions: description: "Create mount point on host if missing" type: "boolean" default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to true in conjunction). + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -794,6 +804,12 @@ definitions: 1000000 (1 ms). 0 means inherit. type: "integer" format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" Health: description: | @@ -1297,7 +1313,10 @@ definitions: type: "boolean" x-nullable: true MacAddress: - description: "MAC address of the container." + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. type: "string" x-nullable: true OnBuild: @@ -1347,16 +1366,16 @@ definitions: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because containers/create currently - # does not support attaching to multiple networks, so the example request - # would be confusing if it showed that multiple networks can be contained - # in the EndpointsConfig. - # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. EndpointsConfig: isolated_nw: IPAMConfig: @@ -1365,19 +1384,22 @@ definitions: LinkLocalIPs: - "169.254.34.68" - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" + database_nw: {} NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: - description: Name of the network's bridge (for example, `docker0`). + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. type: "string" example: "docker0" SandboxID: @@ -1387,34 +1409,40 @@ definitions: HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. type: "boolean" example: false LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. type: "string" - example: "fe80::42:acff:fe11:1" + example: "" LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. type: "integer" - example: "64" + example: "" Ports: $ref: "#/definitions/PortMap" SandboxKey: - description: SandboxKey identifies the sandbox + description: SandboxKey is the full path of the netns handle type: "string" example: "/var/run/docker/netns/8ab54b426c38" - # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: - description: "" + description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" x-nullable: true - # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: - description: "" + description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" @@ -1723,10 +1751,15 @@ definitions: The ID of the container that was used to create the image. Depending on how the image was created, this field may be empty. + + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. type: "string" - x-nullable: false example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" ContainerConfig: + description: | + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. $ref: "#/definitions/ContainerConfig" DockerVersion: description: | @@ -1781,13 +1814,7 @@ definitions: description: | Total size of the image including all layers it is composed of. - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Images are now stored - self-contained, and no longer use a parent-chain, making this field - an equivalent of the Size field. - - > **Deprecated**: this field is kept for backward compatibility, but - > will be removed in API v1.44. + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. type: "integer" format: "int64" example: 1239828 @@ -1829,6 +1856,7 @@ definitions: x-nullable: true ImageSummary: type: "object" + x-go-name: "Summary" required: - Id - ParentId @@ -1925,12 +1953,7 @@ definitions: description: |- Total size of the image including all layers it is composed of. - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Images are now stored - self-contained, and no longer use a parent-chain, making this field - an equivalent of the Size field. - - Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. type: "integer" format: "int64" example: 172064416 @@ -2448,6 +2471,11 @@ definitions: example: - "container_1" - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" Aliases: type: "array" items: @@ -2498,11 +2526,6 @@ definitions: type: "integer" format: "int64" example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options @@ -2514,6 +2537,21 @@ definitions: example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] EndpointIPAMConfig: description: | @@ -3011,8 +3049,6 @@ definitions: Name: "journald" - Type: "Log" Name: "json-file" - - Type: "Log" - Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" @@ -3547,6 +3583,32 @@ definitions: Level: type: "string" description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" @@ -3941,6 +4003,44 @@ definitions: - "remove" - "orphaned" + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + Task: type: "object" properties: @@ -3976,26 +4076,7 @@ definitions: AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" + $ref: "#/definitions/TaskStatus" DesiredState: $ref: "#/definitions/TaskState" JobIteration: @@ -4211,7 +4292,10 @@ definitions: - "stop-first" - "start-first" Networks: - description: "Specifies which networks the service should attach to." + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" @@ -4445,6 +4529,7 @@ definitions: ImageDeleteResponseItem: type: "object" + x-go-name: "DeleteResponse" properties: Untagged: description: "The image ID of an image that was untagged" @@ -4453,6 +4538,29 @@ definitions: description: "The image ID of an image that was deleted" type: "string" + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ServiceUpdateResponse: type: "object" properties: @@ -4462,7 +4570,8 @@ definitions: items: type: "string" example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" @@ -5296,7 +5405,25 @@ definitions: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct @@ -5334,7 +5461,7 @@ definitions: type: "array" items: type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] RegistryServiceConfig: @@ -5532,6 +5659,28 @@ definitions: items: type: "string" example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" Commit: description: | @@ -6416,6 +6565,7 @@ paths: Aliases: - "server_x" - "server_y" + database_nw: {} required: true responses: @@ -6563,7 +6713,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -7994,6 +8144,7 @@ paths: - `label=key` or `label="key=value"` of an image label - `reference`=(`[:]`) - `since`=(`[:]`, `` or ``) + - `until=` type: "string" - name: "shared-size" in: "query" @@ -8176,6 +8327,16 @@ paths: description: "BuildKit output configuration" type: "string" default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) responses: 200: description: "no error" @@ -8245,7 +8406,7 @@ paths: /images/create: post: summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." + description: "Pull or import an image." operationId: "ImageCreate" consumes: - "text/plain" @@ -8596,28 +8757,36 @@ paths: is_official: type: "boolean" is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always + > be "false" in future. type: "boolean" + example: false name: type: "string" star_count: type: "integer" examples: application/json: - - description: "" - is_official: false + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true is_automated: false - name: "vgauthier/sshd" - star_count: 0 + name: "postgres" + star_count: 12408 500: description: "Server error" schema: @@ -8637,9 +8806,13 @@ paths: description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - `is-automated=(true|false)` + - `is-automated=(true|false)` (deprecated, see below) - `is-official=(true|false)` - `stars=` Matches images that has at least 'number' stars. + + The `is-automated` filter is deprecated. The `is_automated` field has + been deprecated by Docker Hub's search API. Consequently, searching + for `is-automated=true` will yield no results. type: "string" tags: ["Image"] /images/prune: @@ -9032,7 +9205,6 @@ paths: Created: 1466724217 Size: 1092588 SharedSize: 0 - VirtualSize: 1092588 Labels: {} Containers: 1 Containers: @@ -9895,6 +10067,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: | Forbidden operation. This happens when trying to create a network named after a pre-defined network, @@ -9924,13 +10100,7 @@ paths: type: "string" CheckDuplicate: description: | - Check for networks with duplicate names. Since Network is - primarily keyed based on a random ID and not on the name, and - network name is strictly a user-friendly alias to the network - which is uniquely identified using ID, there is no guaranteed - way to check for duplicates. CheckDuplicate is there to provide - a best effort checking of any networks which has the same name - but it is not guaranteed to catch all name collisions. + Deprecated: CheckDuplicate is now always enabled. type: "boolean" Driver: description: "Name of the network driver plugin to use." @@ -9998,14 +10168,19 @@ paths: /networks/{id}/connect: post: summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: - description: "Operation not supported for swarm scoped networks" + description: "Operation forbidden" schema: $ref: "#/definitions/ErrorResponse" 404: @@ -10040,6 +10215,7 @@ paths: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" tags: ["Network"] /networks/{id}/disconnect: @@ -11033,18 +11209,7 @@ paths: 201: description: "no error" schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + $ref: "#/definitions/ServiceCreateResponse" 400: description: "bad parameter" schema: diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index 9ee329a2fb..0000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,7 +0,0 @@ -package types // import "github.com/docker/docker/api/types" -import "github.com/docker/docker/api/types/registry" - -// AuthConfig contains authorization information for connecting to a Registry. -// -// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig -type AuthConfig = registry.AuthConfig diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go index 8bdf36923a..613da5517d 100644 --- a/vendor/github.com/docker/docker/api/types/backend/backend.go +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -5,10 +5,29 @@ import ( "io" "time" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + // ContainerAttachConfig holds the streams to use when connecting to a container to view logs. type ContainerAttachConfig struct { GetStreams func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) @@ -122,3 +141,25 @@ type CommitConfig struct { ContainerOS string ParentImageID string } + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/docker/docker/api/types/checkpoint/list.go new file mode 100644 index 0000000000..94a9c0a47d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/checkpoint/list.go @@ -0,0 +1,7 @@ +package checkpoint + +// Summary represents the details of a checkpoint when listing endpoints. +type Summary struct { + // Name is the name of the checkpoint. + Name string +} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go new file mode 100644 index 0000000000..9477458c24 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/checkpoint/options.go @@ -0,0 +1,19 @@ +package checkpoint + +// CreateOptions holds parameters to create a checkpoint from a container. +type CreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// ListOptions holds parameters to list checkpoints for a container. +type ListOptions struct { + CheckpointDir string +} + +// DeleteOptions holds parameters to delete a checkpoint from a container. +type DeleteOptions struct { + CheckpointID string + CheckpointDir string +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index d8cd306135..24b00a2759 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -11,44 +11,6 @@ import ( units "github.com/docker/go-units" ) -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - // ContainerExecInspect holds information returned by exec inspect. type ContainerExecInspect struct { ExecID string `json:"ID"` @@ -58,42 +20,6 @@ type ContainerExecInspect struct { Pid int } -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { @@ -307,14 +233,6 @@ type ImageSearchOptions struct { Limit int } -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args @@ -340,15 +258,6 @@ type ServiceCreateOptions struct { QueryRegistry bool } -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - // Values for RegistryAuthFrom in ServiceUpdateOptions const ( RegistryAuthFromSpec = "spec" diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 7d5930bbeb..945b6efadd 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -1,32 +1,5 @@ package types // import "github.com/docker/docker/api/types" -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *ocispec.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - // ExecConfig is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecConfig struct { @@ -43,25 +16,3 @@ type ExecConfig struct { WorkingDir string // Working directory Cmd []string // Execution commands and args } - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go deleted file mode 100644 index 6b4b47390d..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go +++ /dev/null @@ -1,6 +0,0 @@ -package container - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// -// Deprecated: use [FilesystemChange]. -type ContainerChangeResponseItem = FilesystemChange diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 077583e66c..be41d6315e 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/docker/docker/api/types/strslice" + dockerspec "github.com/docker/docker/image/spec/specs-go/v1" "github.com/docker/go-connections/nat" ) @@ -33,25 +34,7 @@ type StopOptions struct { } // HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} +type HealthConfig = dockerspec.HealthcheckConfig // ExecStartOptions holds the options to start container's exec. type ExecStartOptions struct { @@ -87,10 +70,13 @@ type Config struct { WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint strslice.StrSlice // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT + // Mac Address of the container. + // + // Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead. + MacAddress string `json:",omitempty"` + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT } diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/docker/docker/api/types/container/errors.go new file mode 100644 index 0000000000..32c978037e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/errors.go @@ -0,0 +1,9 @@ +package container + +type errInvalidParameter struct{ error } + +func (e *errInvalidParameter) InvalidParameter() {} + +func (e *errInvalidParameter) Unwrap() error { + return e.error +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index d4e6f55375..efb96266e8 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,10 +1,12 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "fmt" "strings" "github.com/docker/docker/api/types/blkiodev" "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" units "github.com/docker/go-units" @@ -132,12 +134,12 @@ type NetworkMode string // IsNone indicates whether container isn't using a network stack. func (n NetworkMode) IsNone() bool { - return n == "none" + return n == network.NetworkNone } // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { - return n == "default" + return n == network.NetworkDefault } // IsPrivate indicates whether container uses its private network stack. @@ -271,33 +273,42 @@ type DeviceMapping struct { // RestartPolicy represents the restart policies of the container. type RestartPolicy struct { - Name string + Name RestartPolicyMode MaximumRetryCount int } +type RestartPolicyMode string + +const ( + RestartPolicyDisabled RestartPolicyMode = "no" + RestartPolicyAlways RestartPolicyMode = "always" + RestartPolicyOnFailure RestartPolicyMode = "on-failure" + RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" +) + // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" + return rp.Name == RestartPolicyDisabled || rp.Name == "" } // IsAlways indicates whether the container has the "always" restart policy. // This means the container will automatically restart regardless of the exit status. func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" + return rp.Name == RestartPolicyAlways } // IsOnFailure indicates whether the container has the "on-failure" restart policy. // This means the container will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" + return rp.Name == RestartPolicyOnFailure } // IsUnlessStopped indicates whether the container has the // "unless-stopped" restart policy. This means the container will // automatically restart unless user has put it to stopped state. func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" + return rp.Name == RestartPolicyUnlessStopped } // IsSame compares two RestartPolicy to see if they are the same @@ -305,6 +316,33 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount } +// ValidateRestartPolicy validates the given RestartPolicy. +func ValidateRestartPolicy(policy RestartPolicy) error { + switch policy.Name { + case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: + if policy.MaximumRetryCount != 0 { + msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" + if policy.MaximumRetryCount < 0 { + msg += " and cannot be negative" + } + return &errInvalidParameter{fmt.Errorf(msg)} + } + return nil + case RestartPolicyOnFailure: + if policy.MaximumRetryCount < 0 { + return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} + } + return nil + case "": + // Versions before v25.0.0 created an empty restart-policy "name" as + // default. Allow an empty name with "any" MaximumRetryCount for + // backward-compatibility. + return nil + default: + return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} + } +} + // LogMode is a type to define the available modes for logging // These modes affect how logs are handled when log messages start piling up. type LogMode string diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 24c4fa8d90..4213292378 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,8 +1,9 @@ //go:build !windows -// +build !windows package container // import "github.com/docker/docker/api/types/container" +import "github.com/docker/docker/api/types/network" + // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() @@ -11,15 +12,15 @@ func (i Isolation) IsValid() bool { // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { - return "bridge" + return network.NetworkBridge } else if n.IsHost() { - return "host" + return network.NetworkHost } else if n.IsContainer() { return "container" } else if n.IsNone() { - return "none" + return network.NetworkNone } else if n.IsDefault() { - return "default" + return network.NetworkDefault } else if n.IsUserDefined() { return n.UserDefined() } @@ -28,12 +29,12 @@ func (n NetworkMode) NetworkName() string { // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { - return n == "bridge" + return n == network.NetworkBridge } // IsHost indicates whether container uses the host network stack. func (n NetworkMode) IsHost() bool { - return n == "host" + return n == network.NetworkHost } // IsUserDefined indicates user-created network diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 99f803a5bb..154667f4f0 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,9 +1,11 @@ package container // import "github.com/docker/docker/api/types/container" +import "github.com/docker/docker/api/types/network" + // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { - return n == "nat" + return n == network.NetworkNat } // IsHost indicates whether container uses the host network stack. @@ -25,11 +27,11 @@ func (i Isolation) IsValid() bool { // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsDefault() { - return "default" + return network.NetworkDefault } else if n.IsBridge() { - return "nat" + return network.NetworkNat } else if n.IsNone() { - return "none" + return network.NetworkNone } else if n.IsContainer() { return "container" } else if n.IsUserDefined() { diff --git a/vendor/github.com/docker/docker/api/types/container/options.go b/vendor/github.com/docker/docker/api/types/container/options.go new file mode 100644 index 0000000000..7a23005769 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/options.go @@ -0,0 +1,67 @@ +package container + +import "github.com/docker/docker/api/types/filters" + +// ResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +type ResizeOptions struct { + Height uint + Width uint +} + +// AttachOptions holds parameters to attach to a container. +type AttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// CommitOptions holds parameters to commit changes into a container. +type CommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *Config +} + +// RemoveOptions holds parameters to remove containers. +type RemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// StartOptions holds parameters to start containers. +type StartOptions struct { + CheckpointID string + CheckpointDir string +} + +// ListOptions holds parameters to list containers with. +type ListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// LogsOptions holds parameters to filter logs with. +type LogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 9fe07e26fd..6dbcd92235 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,7 +1,7 @@ package events // import "github.com/docker/docker/api/types/events" // Type is used for event-types. -type Type = string +type Type string // List of known event types. const ( @@ -18,6 +18,86 @@ const ( VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. ) +// Action is used for event-actions. +type Action string + +const ( + ActionCreate Action = "create" + ActionStart Action = "start" + ActionRestart Action = "restart" + ActionStop Action = "stop" + ActionCheckpoint Action = "checkpoint" + ActionPause Action = "pause" + ActionUnPause Action = "unpause" + ActionAttach Action = "attach" + ActionDetach Action = "detach" + ActionResize Action = "resize" + ActionUpdate Action = "update" + ActionRename Action = "rename" + ActionKill Action = "kill" + ActionDie Action = "die" + ActionOOM Action = "oom" + ActionDestroy Action = "destroy" + ActionRemove Action = "remove" + ActionCommit Action = "commit" + ActionTop Action = "top" + ActionCopy Action = "copy" + ActionArchivePath Action = "archive-path" + ActionExtractToDir Action = "extract-to-dir" + ActionExport Action = "export" + ActionImport Action = "import" + ActionSave Action = "save" + ActionLoad Action = "load" + ActionTag Action = "tag" + ActionUnTag Action = "untag" + ActionPush Action = "push" + ActionPull Action = "pull" + ActionPrune Action = "prune" + ActionDelete Action = "delete" + ActionEnable Action = "enable" + ActionDisable Action = "disable" + ActionConnect Action = "connect" + ActionDisconnect Action = "disconnect" + ActionReload Action = "reload" + ActionMount Action = "mount" + ActionUnmount Action = "unmount" + + // ActionExecCreate is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_create: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecCreate Action = "exec_create" + // ActionExecStart is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_start: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecStart Action = "exec_start" + ActionExecDie Action = "exec_die" + ActionExecDetach Action = "exec_detach" + + // ActionHealthStatus is the prefix to use for health_status events. + // + // Health-status events can either have a pre-defined status, in which + // case the "health_status" action is followed by a colon, or can be + // "free-form", in which case they're followed by the output of the + // health-check output. + // + // This is far form ideal, and a compromise to allow filtering, and + // to preserve backward-compatibility. + ActionHealthStatus Action = "health_status" + ActionHealthStatusRunning Action = "health_status: running" + ActionHealthStatusHealthy Action = "health_status: healthy" + ActionHealthStatusUnhealthy Action = "health_status: unhealthy" +) + // Actor describes something that generates events, // like a container, or a network, or a volume. // It has a defined name and a set of attributes. @@ -37,7 +117,7 @@ type Message struct { From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. Type Type - Action string + Action Action Actor Actor // Engine events are local scope. Cluster events are swarm scope. Scope string `json:"scope,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image/delete_response.go similarity index 68% rename from vendor/github.com/docker/docker/api/types/image_delete_response_item.go rename to vendor/github.com/docker/docker/api/types/image/delete_response.go index b9a65a0d8e..998620dc6a 100644 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ b/vendor/github.com/docker/docker/api/types/image/delete_response.go @@ -1,11 +1,11 @@ -package types +package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { +// DeleteResponse delete response +// swagger:model DeleteResponse +type DeleteResponse struct { // The image ID of an image that was deleted Deleted string `json:"Deleted,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go new file mode 100644 index 0000000000..167df28c7b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image.go @@ -0,0 +1,9 @@ +package image + +import "time" + +// Metadata contains engine-local data about the image. +type Metadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go similarity index 85% rename from vendor/github.com/docker/docker/api/types/image_summary.go rename to vendor/github.com/docker/docker/api/types/image/summary.go index 0f6f144840..f1e3e2ef01 100644 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,11 +1,11 @@ -package types +package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { +// Summary summary +// swagger:model Summary +type Summary struct { // Number of containers using this image. Includes both stopped and running // containers. @@ -84,11 +84,6 @@ type ImageSummary struct { // Total size of the image including all layers it is composed of. // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Images are now stored - // self-contained, and no longer use a parent-chain, making this field - // an equivalent of the Size field. - // - // Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. VirtualSize int64 `json:"VirtualSize,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index ac4ce62231..57edf2ef18 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -29,7 +29,7 @@ type Mount struct { // Source is not supported for tmpfs (must be an empty value) Source string `json:",omitempty"` Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible Consistency Consistency `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` @@ -85,6 +85,11 @@ type BindOptions struct { Propagation Propagation `json:",omitempty"` NonRecursive bool `json:",omitempty"` CreateMountpoint bool `json:",omitempty"` + // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive + // (unless NonRecursive is set to true in conjunction). + ReadOnlyNonRecursive bool `json:",omitempty"` + // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. + ReadOnlyForceRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go new file mode 100644 index 0000000000..9edd1c38d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -0,0 +1,147 @@ +package network + +import ( + "errors" + "fmt" + "net" + + "github.com/docker/docker/internal/multierror" +) + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. + // MacAddress may be used to specify a MAC address when the container is created. + // Once the container is running, it becomes operational data (it may contain a + // generated address). + MacAddress string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + DriverOpts map[string]string + // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to + // generate PTR records. + DNSNames []string +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + + if len(es.DNSNames) > 0 { + epCopy.DNSNames = make([]string, len(es.DNSNames)) + copy(epCopy.DNSNames, es.DNSNames) + } + + return &epCopy +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an +// EndpointIPAMConfig is valid for a specific network. +type NetworkSubnet interface { + // Contains checks whether the NetworkSubnet contains [addr]. + Contains(addr net.IP) bool + // IsStatic checks whether the subnet was statically allocated (ie. user-defined). + IsStatic() bool +} + +// IsInRange checks whether static IP addresses are valid in a specific network. +func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error { + var errs []error + + if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil { + errs = append(errs, err) + } + if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil { + errs = append(errs, err) + } + + return multierror.Join(errs...) +} + +func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { + if epAddr == "" { + return nil + } + + var staticSubnet bool + parsedAddr := net.ParseIP(epAddr) + for _, subnet := range ipamSubnets { + if subnet.IsStatic() { + staticSubnet = true + if subnet.Contains(parsedAddr) { + return nil + } + } + } + + if staticSubnet { + return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr) + } + + return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets") +} + +// Validate checks whether cfg is valid. +func (cfg *EndpointIPAMConfig) Validate() error { + if cfg == nil { + return nil + } + + var errs []error + + if cfg.IPv4Address != "" { + if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address)) + } + } + if cfg.IPv6Address != "" { + if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address)) + } + } + for _, addr := range cfg.LinkLocalIPs { + if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr)) + } + } + + return multierror.Join(errs...) +} diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go new file mode 100644 index 0000000000..f319e1402b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/ipam.go @@ -0,0 +1,134 @@ +package network + +import ( + "errors" + "fmt" + "net/netip" + + "github.com/docker/docker/internal/multierror" +) + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +type ipFamily string + +const ( + ip4 ipFamily = "IPv4" + ip6 ipFamily = "IPv6" +) + +// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of +// errors found. +func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { + if ipam == nil { + return nil + } + + var errs []error + for _, cfg := range ipam.Config { + subnet, err := netip.ParsePrefix(cfg.Subnet) + if err != nil { + errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet)) + continue + } + subnetFamily := ip4 + if subnet.Addr().Is6() { + subnetFamily = ip6 + } + + if !enableIPv6 && subnetFamily == ip6 { + continue + } + + if subnet != subnet.Masked() { + errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked())) + } + + if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 { + errs = append(errs, ipRangeErrs...) + } + + if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil { + errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err)) + } + + for auxName, aux := range cfg.AuxAddress { + if err := validateAddress(aux, subnet, subnetFamily); err != nil { + errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err)) + } + } + } + + if err := multierror.Join(errs...); err != nil { + return fmt.Errorf("invalid network config:\n%w", err) + } + + return nil +} + +func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error { + if ipRange == "" { + return nil + } + prefix, err := netip.ParsePrefix(ipRange) + if err != nil { + return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)} + } + family := ip4 + if prefix.Addr().Is6() { + family = ip6 + } + + if family != subnetFamily { + return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)} + } + + var errs []error + if prefix.Bits() < subnet.Bits() { + errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet)) + } + if prefix != prefix.Masked() { + errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked())) + } + if !subnet.Overlaps(prefix) { + errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet)) + } + + return errs +} + +func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error { + if address == "" { + return nil + } + addr, err := netip.ParseAddr(address) + if err != nil { + return errors.New("invalid address") + } + family := ip4 + if addr.Is6() { + family = ip6 + } + + if family != subnetFamily { + return fmt.Errorf("parent subnet is an %s block", subnetFamily) + } + if !subnet.Contains(addr) { + return fmt.Errorf("parent subnet %s doesn't contain this address", subnet) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 437b184c67..f1f300f3d7 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,69 +1,34 @@ package network // import "github.com/docker/docker/api/types/network" + import ( "github.com/docker/docker/api/types/filters" ) +const ( + // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. + NetworkDefault = "default" + // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) + NetworkHost = "host" + // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) + NetworkNone = "none" + // NetworkBridge is the name of the default network on Linux + NetworkBridge = "bridge" + // NetworkNat is the name of the default network on Windows + NetworkNat = "nat" +) + // Address represents an IP address type Address struct { Addr string PrefixLen int } -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - // PeerInfo represents one peer of an overlay network type PeerInfo struct { Name string IP string } -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - // Task carries the information about one backend task type Task struct { Name string @@ -80,25 +45,6 @@ type ServiceInfo struct { Tasks []Task } -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networking configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go index 5ced16895c..167d1d5f88 100644 --- a/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go +++ b/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go @@ -1,24 +1,15 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: entry.proto -// DO NOT EDIT! -/* - Package logdriver is a generated protocol buffer package. - - It is generated from these files: - entry.proto - - It has these top-level messages: - LogEntry - PartialLogEntryMetadata -*/ package logdriver -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -29,20 +20,48 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type LogEntry struct { Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` - PartialLogMetadata *PartialLogEntryMetadata `protobuf:"bytes,5,opt,name=partial_log_metadata,json=partialLogMetadata" json:"partial_log_metadata,omitempty"` + PartialLogMetadata *PartialLogEntryMetadata `protobuf:"bytes,5,opt,name=partial_log_metadata,json=partialLogMetadata,proto3" json:"partial_log_metadata,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_daa6c5b6c627940f, []int{0} +} +func (m *LogEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LogEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogEntry.Merge(m, src) +} +func (m *LogEntry) XXX_Size() int { + return m.Size() +} +func (m *LogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_LogEntry.DiscardUnknown(m) } -func (m *LogEntry) Reset() { *m = LogEntry{} } -func (m *LogEntry) String() string { return proto.CompactTextString(m) } -func (*LogEntry) ProtoMessage() {} -func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } +var xxx_messageInfo_LogEntry proto.InternalMessageInfo func (m *LogEntry) GetSource() string { if m != nil { @@ -85,10 +104,38 @@ type PartialLogEntryMetadata struct { Ordinal int32 `protobuf:"varint,3,opt,name=ordinal,proto3" json:"ordinal,omitempty"` } -func (m *PartialLogEntryMetadata) Reset() { *m = PartialLogEntryMetadata{} } -func (m *PartialLogEntryMetadata) String() string { return proto.CompactTextString(m) } -func (*PartialLogEntryMetadata) ProtoMessage() {} -func (*PartialLogEntryMetadata) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{1} } +func (m *PartialLogEntryMetadata) Reset() { *m = PartialLogEntryMetadata{} } +func (m *PartialLogEntryMetadata) String() string { return proto.CompactTextString(m) } +func (*PartialLogEntryMetadata) ProtoMessage() {} +func (*PartialLogEntryMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_daa6c5b6c627940f, []int{1} +} +func (m *PartialLogEntryMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialLogEntryMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartialLogEntryMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartialLogEntryMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialLogEntryMetadata.Merge(m, src) +} +func (m *PartialLogEntryMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialLogEntryMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialLogEntryMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialLogEntryMetadata proto.InternalMessageInfo func (m *PartialLogEntryMetadata) GetLast() bool { if m != nil { @@ -115,10 +162,33 @@ func init() { proto.RegisterType((*LogEntry)(nil), "LogEntry") proto.RegisterType((*PartialLogEntryMetadata)(nil), "PartialLogEntryMetadata") } + +func init() { proto.RegisterFile("entry.proto", fileDescriptor_daa6c5b6c627940f) } + +var fileDescriptor_daa6c5b6c627940f = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xbd, 0x4a, 0x04, 0x31, + 0x14, 0x85, 0xe7, 0xce, 0xfe, 0x38, 0x73, 0x57, 0x2c, 0x82, 0x68, 0x40, 0x08, 0x61, 0xab, 0x54, + 0x5b, 0xe8, 0x1b, 0x08, 0x36, 0xa2, 0x22, 0x69, 0x2c, 0x87, 0xab, 0x13, 0x96, 0xc0, 0x6c, 0x32, + 0x64, 0x63, 0xe1, 0x5b, 0xf8, 0x3a, 0xbe, 0x81, 0xe5, 0x96, 0x96, 0x32, 0xf3, 0x22, 0x32, 0x71, + 0xc6, 0xce, 0xee, 0x9c, 0xf3, 0xa5, 0xf8, 0x72, 0x71, 0x65, 0x5c, 0x0c, 0x6f, 0x9b, 0x36, 0xf8, + 0xe8, 0xd7, 0x1f, 0x80, 0xc5, 0x9d, 0xdf, 0xde, 0x0c, 0x13, 0x3b, 0xc3, 0xe5, 0xde, 0xbf, 0x86, + 0x17, 0xc3, 0x41, 0x82, 0x2a, 0xf5, 0xd8, 0xd8, 0x05, 0x96, 0xd1, 0xee, 0x4c, 0xe5, 0xc8, 0x79, + 0x9e, 0x4b, 0x50, 0x33, 0x5d, 0x0c, 0xc3, 0x03, 0x39, 0xcf, 0x18, 0xce, 0x1b, 0xeb, 0x0c, 0x9f, + 0x49, 0x50, 0xc7, 0x3a, 0x65, 0xc6, 0xf1, 0xa8, 0xa5, 0x10, 0x2d, 0x35, 0x7c, 0x2e, 0x41, 0x15, + 0x7a, 0xaa, 0xec, 0x16, 0x4f, 0xc7, 0x58, 0x35, 0x7e, 0x5b, 0xed, 0x4c, 0xa4, 0x9a, 0x22, 0xf1, + 0x85, 0x04, 0xb5, 0xba, 0xe4, 0x9b, 0xc7, 0x5f, 0x38, 0x29, 0xdd, 0x8f, 0x5c, 0xb3, 0xf6, 0x0f, + 0x4c, 0xdb, 0xfa, 0x09, 0xcf, 0xff, 0x79, 0x9e, 0xa4, 0x68, 0x1f, 0xd3, 0x3f, 0x0a, 0x9d, 0x32, + 0x3b, 0xc1, 0xdc, 0xd6, 0x49, 0xbf, 0xd4, 0xb9, 0xad, 0x07, 0x49, 0x1f, 0x6a, 0xeb, 0xa8, 0x49, + 0xee, 0x0b, 0x3d, 0xd5, 0x6b, 0xfe, 0xd9, 0x09, 0x38, 0x74, 0x02, 0xbe, 0x3b, 0x01, 0xef, 0xbd, + 0xc8, 0x0e, 0xbd, 0xc8, 0xbe, 0x7a, 0x91, 0x3d, 0x2f, 0xd3, 0xd5, 0xae, 0x7e, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xbb, 0x82, 0x62, 0xd5, 0x44, 0x01, 0x00, 0x00, +} + func (m *LogEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -126,54 +196,63 @@ func (m *LogEntry) Marshal() (dAtA []byte, err error) { } func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Source) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) - i += copy(dAtA[i:], m.Source) - } - if m.TimeNano != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) - } - if len(m.Line) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) - i += copy(dAtA[i:], m.Line) + if m.PartialLogMetadata != nil { + { + size, err := m.PartialLogMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEntry(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } if m.Partial { - dAtA[i] = 0x20 - i++ + i-- if m.Partial { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x20 } - if m.PartialLogMetadata != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintEntry(dAtA, i, uint64(m.PartialLogMetadata.Size())) - n1, err := m.PartialLogMetadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + if len(m.Line) > 0 { + i -= len(m.Line) + copy(dAtA[i:], m.Line) + i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) + i-- + dAtA[i] = 0x1a + } + if m.TimeNano != 0 { + i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) + i-- + dAtA[i] = 0x10 + } + if len(m.Source) > 0 { + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *PartialLogEntryMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -181,62 +260,55 @@ func (m *PartialLogEntryMetadata) Marshal() (dAtA []byte, err error) { } func (m *PartialLogEntryMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialLogEntryMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if m.Ordinal != 0 { + i = encodeVarintEntry(dAtA, i, uint64(m.Ordinal)) + i-- + dAtA[i] = 0x18 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintEntry(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x12 + } if m.Last { - dAtA[i] = 0x8 - i++ + i-- if m.Last { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if len(m.Id) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintEntry(dAtA, i, uint64(len(m.Id))) - i += copy(dAtA[i:], m.Id) - } - if m.Ordinal != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintEntry(dAtA, i, uint64(m.Ordinal)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } -func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { + offset -= sovEntry(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *LogEntry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Source) @@ -261,6 +333,9 @@ func (m *LogEntry) Size() (n int) { } func (m *PartialLogEntryMetadata) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Last { @@ -277,14 +352,7 @@ func (m *PartialLogEntryMetadata) Size() (n int) { } func sovEntry(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozEntry(x uint64) (n int) { return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -304,7 +372,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -332,7 +400,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -342,6 +410,9 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEntry } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEntry + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -361,7 +432,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeNano |= (int64(b) & 0x7F) << shift + m.TimeNano |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -380,7 +451,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -389,6 +460,9 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEntry } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthEntry + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -411,7 +485,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -431,7 +505,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -440,6 +514,9 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEntry } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEntry + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -456,7 +533,7 @@ func (m *LogEntry) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEntry } if (iNdEx + skippy) > l { @@ -486,7 +563,7 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -514,7 +591,7 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -534,7 +611,7 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -544,6 +621,9 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEntry } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEntry + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -563,7 +643,7 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Ordinal |= (int32(b) & 0x7F) << shift + m.Ordinal |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -574,7 +654,7 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthEntry } if (iNdEx + skippy) > l { @@ -592,6 +672,7 @@ func (m *PartialLogEntryMetadata) Unmarshal(dAtA []byte) error { func skipEntry(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -623,10 +704,8 @@ func skipEntry(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -643,74 +722,34 @@ func skipEntry(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthEntry } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipEntry(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEntry + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthEntry + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEntry = fmt.Errorf("proto: unexpected end of group") ) - -func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } - -var fileDescriptorEntry = []byte{ - // 237 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x90, 0xbd, 0x4a, 0x04, 0x31, - 0x14, 0x85, 0xb9, 0xb3, 0x3f, 0xce, 0xdc, 0x5d, 0x2c, 0x82, 0x68, 0x40, 0x18, 0xc2, 0x56, 0xa9, - 0xb6, 0xd0, 0x37, 0x10, 0x6c, 0x44, 0x45, 0xd2, 0x58, 0x0e, 0x57, 0x27, 0x2c, 0x81, 0xd9, 0xdc, - 0x21, 0x13, 0x0b, 0x1f, 0xcd, 0x37, 0xb0, 0xf4, 0x11, 0x64, 0x9e, 0x44, 0x26, 0x4e, 0xec, 0xec, - 0xce, 0x39, 0x5f, 0x8a, 0x2f, 0x17, 0x37, 0xd6, 0xc7, 0xf0, 0xbe, 0xef, 0x03, 0x47, 0xde, 0x7d, - 0x00, 0x96, 0xf7, 0x7c, 0xb8, 0x9d, 0x26, 0x71, 0x8e, 0xeb, 0x81, 0xdf, 0xc2, 0xab, 0x95, 0xa0, - 0x40, 0x57, 0x66, 0x6e, 0xe2, 0x12, 0xab, 0xe8, 0x8e, 0xb6, 0xf1, 0xe4, 0x59, 0x16, 0x0a, 0xf4, - 0xc2, 0x94, 0xd3, 0xf0, 0x48, 0x9e, 0x85, 0xc0, 0x65, 0xe7, 0xbc, 0x95, 0x0b, 0x05, 0x7a, 0x6b, - 0x52, 0x16, 0x12, 0x4f, 0x7a, 0x0a, 0xd1, 0x51, 0x27, 0x97, 0x0a, 0x74, 0x69, 0x72, 0x15, 0x77, - 0x78, 0x36, 0xc7, 0xa6, 0xe3, 0x43, 0x73, 0xb4, 0x91, 0x5a, 0x8a, 0x24, 0x57, 0x0a, 0xf4, 0xe6, - 0x4a, 0xee, 0x9f, 0x7e, 0x61, 0x56, 0x7a, 0x98, 0xb9, 0x11, 0xfd, 0x1f, 0xc8, 0xdb, 0xee, 0x19, - 0x2f, 0xfe, 0x79, 0x9e, 0xa4, 0x68, 0x88, 0xe9, 0x1f, 0xa5, 0x49, 0x59, 0x9c, 0x62, 0xe1, 0xda, - 0xa4, 0x5f, 0x99, 0xc2, 0xb5, 0x93, 0x24, 0x87, 0xd6, 0x79, 0xea, 0x92, 0xfb, 0xca, 0xe4, 0x7a, - 0xb3, 0xfd, 0x1c, 0x6b, 0xf8, 0x1a, 0x6b, 0xf8, 0x1e, 0x6b, 0x78, 0x59, 0xa7, 0x4b, 0x5d, 0xff, - 0x04, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xed, 0x9f, 0xb6, 0x38, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go index e5f10b5e0d..04d39c2f95 100644 --- a/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go +++ b/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto +//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index b83f5d7b2e..05cb31075f 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -92,7 +92,9 @@ type SearchResult struct { IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` - // IsAutomated indicates whether the result is automated + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index af5e1c0bc2..65f61d2d20 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -32,6 +32,42 @@ type SELinuxContext struct { Level string } +// SeccompMode is the type used for the enumeration of possible seccomp modes +// in SeccompOpts +type SeccompMode string + +const ( + SeccompModeDefault SeccompMode = "default" + SeccompModeUnconfined SeccompMode = "unconfined" + SeccompModeCustom SeccompMode = "custom" +) + +// SeccompOpts defines the options for configuring seccomp on a swarm-managed +// container. +type SeccompOpts struct { + // Mode is the SeccompMode used for the container. + Mode SeccompMode `json:",omitempty"` + // Profile is the custom seccomp profile as a json object to be used with + // the container. Mode should be set to SeccompModeCustom when using a + // custom profile in this manner. + Profile []byte `json:",omitempty"` +} + +// AppArmorMode is type used for the enumeration of possible AppArmor modes in +// AppArmorOpts +type AppArmorMode string + +const ( + AppArmorModeDefault AppArmorMode = "default" + AppArmorModeDisabled AppArmorMode = "disabled" +) + +// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed +// container. Currently, custom AppArmor profiles are not supported. +type AppArmorOpts struct { + Mode AppArmorMode `json:",omitempty"` +} + // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { Config string @@ -41,8 +77,11 @@ type CredentialSpec struct { // Privileges defines the security options for the container. type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext + Seccomp *SeccompOpts `json:",omitempty"` + AppArmor *AppArmorOpts `json:",omitempty"` + NoNewPrivileges bool } // ContainerSpec represents the spec of a container. diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go index 98c2806c31..292bd7afc8 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto +//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go index e45045866a..32aaf0d519 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -1,23 +1,15 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -28,22 +20,50 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. type PluginSpec struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{0} +} +func (m *PluginSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginSpec.Merge(m, src) +} +func (m *PluginSpec) XXX_Size() int { + return m.Size() +} +func (m *PluginSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PluginSpec.DiscardUnknown(m) } -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } +var xxx_messageInfo_PluginSpec proto.InternalMessageInfo func (m *PluginSpec) GetName() string { if m != nil { @@ -85,13 +105,41 @@ func (m *PluginSpec) GetEnv() []string { type PluginPrivilege struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` } -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{1} +} +func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginPrivilege) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginPrivilege.Merge(m, src) +} +func (m *PluginPrivilege) XXX_Size() int { + return m.Size() +} +func (m *PluginPrivilege) XXX_DiscardUnknown() { + xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo func (m *PluginPrivilege) GetName() string { if m != nil { @@ -118,10 +166,32 @@ func init() { proto.RegisterType((*PluginSpec)(nil), "PluginSpec") proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") } + +func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } + +var fileDescriptor_22a625af4bc1cc87 = []byte{ + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, + 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, + 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, + 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, + 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, + 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, + 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, + 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, + 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, + 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, + 0x00, +} + func (m *PluginSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -129,66 +199,69 @@ func (m *PluginSpec) Marshal() (dAtA []byte, err error) { } func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if m.Disabled { - dAtA[i] = 0x20 - i++ + i-- if m.Disabled { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x20 } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Privileges) > 0 { + for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return i, nil + if len(m.Remote) > 0 { + i -= len(m.Remote) + copy(dAtA[i:], m.Remote) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -196,50 +269,56 @@ func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { } func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if len(m.Value) > 0 { + for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Value[iNdEx]) + copy(dAtA[i:], m.Value[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) + i-- + dAtA[i] = 0x12 } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + offset -= sovPlugin(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PluginSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -269,6 +348,9 @@ func (m *PluginSpec) Size() (n int) { } func (m *PluginPrivilege) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -289,14 +371,7 @@ func (m *PluginPrivilege) Size() (n int) { } func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozPlugin(x uint64) (n int) { return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -316,7 +391,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -344,7 +419,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -354,6 +429,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -373,7 +451,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -383,6 +461,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -402,7 +483,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -411,6 +492,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -433,7 +517,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -453,7 +537,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -463,6 +547,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -474,7 +561,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { @@ -504,7 +591,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -532,7 +619,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -542,6 +629,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -561,7 +651,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -571,6 +661,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -590,7 +683,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -600,6 +693,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -611,7 +707,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { @@ -629,6 +725,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { func skipPlugin(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -660,10 +757,8 @@ func skipPlugin(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -680,75 +775,34 @@ func skipPlugin(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthPlugin } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPlugin + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthPlugin + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") ) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto index 9ef169046b..e311b36ba2 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -1,7 +1,5 @@ syntax = "proto3"; -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. message PluginSpec { diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index 6eb452d24d..5b6d5ec120 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -34,9 +34,9 @@ type ServiceSpec struct { UpdateConfig *UpdateConfig `json:",omitempty"` RollbackConfig *UpdateConfig `json:",omitempty"` - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. + // Networks specifies which networks the service should attach to. + // + // Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. Networks []NetworkAttachmentConfig `json:",omitempty"` EndpointSpec *EndpointSpec `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go new file mode 100644 index 0000000000..9a268ff1b9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go @@ -0,0 +1,20 @@ +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceCreateResponse contains the information returned to a client on the +// creation of a new service. +// +// swagger:model ServiceCreateResponse +type ServiceCreateResponse struct { + + // The ID of the created service. + ID string `json:"ID,omitempty"` + + // Optional warning message. + // + // FIXME(thaJeztah): this should have "omitempty" in the generated type. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go similarity index 95% rename from vendor/github.com/docker/docker/api/types/service_update_response.go rename to vendor/github.com/docker/docker/api/types/swarm/service_update_response.go index 74ea64b1bb..0417467dae 100644 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go @@ -1,4 +1,4 @@ -package types +package swarm // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go new file mode 100644 index 0000000000..89d4a0098e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -0,0 +1,116 @@ +package system + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" +) + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]RuntimeWithStatus + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + CDISpecDirs []string + + // Legacy API fields for older API versions. + legacyFields + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +type legacyFields struct { + ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [Info] struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// NetworkAddressPool is a temp struct used by [Info] struct. +type NetworkAddressPool struct { + Base string + Size int +} diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/docker/docker/api/types/system/runtime.go new file mode 100644 index 0000000000..d077295a0d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/runtime.go @@ -0,0 +1,20 @@ +package system + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]interface{} `json:"options,omitempty"` +} + +// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. +type RuntimeWithStatus struct { + Runtime + Status map[string]string `json:"status,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go new file mode 100644 index 0000000000..edff3eb1ac --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/security_opts.go @@ -0,0 +1,48 @@ +package system + +import ( + "errors" + "fmt" + "strings" +) + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a +// type-safe [SecurityOpt]. +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + for _, s := range strings.Split(opt, ",") { + k, v, ok := strings.Cut(s, "=") + if !ok { + return nil, fmt.Errorf("invalid security option %q", s) + } + if k == "" || v == "" { + return nil, errors.New("invalid empty security option") + } + if k == "name" { + secopt.Name = v + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) + } + so = append(so, secopt) + } + return so, nil +} + +// KeyValue holds a key/value pair. +type KeyValue struct { + Key, Value string +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index b413e02000..5c56a0cafe 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,18 +1,15 @@ package types // import "github.com/docker/docker/api/types" import ( - "errors" - "fmt" "io" "os" - "strings" "time" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" "github.com/docker/go-connections/nat" @@ -80,6 +77,8 @@ type ImageInspect struct { // Container is the ID of the container that was used to create the image. // // Depending on how the image was created, this field may be empty. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. Container string // ContainerConfig is an optional field containing the configuration of the @@ -87,6 +86,8 @@ type ImageInspect struct { // // Previous versions of Docker builder used this field to store build cache, // and it is not in active use anymore. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. ContainerConfig *container.Config // DockerVersion is the version of Docker that was used to build the image. @@ -118,12 +119,7 @@ type ImageInspect struct { // VirtualSize is the total size of the image including all layers it is // composed of. // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Docker v1.10 and up - // store images self-contained, and no longer use a parent-chain, making - // this field an equivalent of the Size field. - // - // Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions. + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. VirtualSize int64 `json:"VirtualSize,omitempty"` // GraphDriver holds information about the storage driver used to store the @@ -137,13 +133,7 @@ type ImageInspect struct { // Metadata of the image in the local cache. // // This information is local to the daemon, and not part of the image itself. - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` + Metadata image.Metadata } // Container contains response of Engine API: @@ -237,148 +227,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - // ExecStartCheck is a temp struct used by execStart // Config fields is part of ExecConfig in runconfig package type ExecStartCheck struct { @@ -491,17 +339,27 @@ type SummaryNetworkSettings struct { Networks map[string]*network.EndpointSettings } -// NetworkSettingsBase holds basic information about networks +// NetworkSettingsBase holds networking state for a container when inspecting it. type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address + Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + + // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + // + // Deprecated: This field is never set and will be removed in a future release. + HairpinMode bool + // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6Address string + // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6PrefixLen int + SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. + SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. } // DefaultNetworkSettings holds network information @@ -594,14 +452,9 @@ type EndpointResource struct { // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool + // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client + // package to older daemons. + CheckDuplicate bool `json:",omitempty"` Driver string Scope string EnableIPv6 bool @@ -645,33 +498,6 @@ type NetworkInspectOptions struct { Verbose bool } -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - // "Legacy" runtime configuration for runc-compatible runtimes. - - Path string `json:"path,omitempty"` - Args []string `json:"runtimeArgs,omitempty"` - - // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` - - // This is exposed here only for internal use - ShimConfig *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -697,7 +523,7 @@ type DiskUsageOptions struct { // GET "/system/df" type DiskUsage struct { LayersSize int64 - Images []*ImageSummary + Images []*image.Summary Containers []*Container Volumes []*volume.Volume BuildCache []*BuildCache @@ -721,7 +547,7 @@ type VolumesPruneReport struct { // ImagesPruneReport contains the response for Engine API: // POST "/images/prune" type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem + ImagesDeleted []image.DeleteResponse SpaceReclaimed uint64 } diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go new file mode 100644 index 0000000000..e332a7bb6d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -0,0 +1,138 @@ +package types + +import ( + "github.com/docker/docker/api/types/checkpoint" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/system" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +// +// Deprecated: use [checkpoint.CreateOptions]. +type CheckpointCreateOptions = checkpoint.CreateOptions + +// CheckpointListOptions holds parameters to list checkpoints for a container +// +// Deprecated: use [checkpoint.ListOptions]. +type CheckpointListOptions = checkpoint.ListOptions + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +// +// Deprecated: use [checkpoint.DeleteOptions]. +type CheckpointDeleteOptions = checkpoint.DeleteOptions + +// Checkpoint represents the details of a checkpoint when listing endpoints. +// +// Deprecated: use [checkpoint.Summary]. +type Checkpoint = checkpoint.Summary + +// Info contains response of Engine API: +// GET "/info" +// +// Deprecated: use [system.Info]. +type Info = system.Info + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +// +// Deprecated: use [system.Commit]. +type Commit = system.Commit + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [system.Info] struct +// +// Deprecated: use [system.PluginsInfo]. +type PluginsInfo = system.PluginsInfo + +// NetworkAddressPool is a temp struct used by [system.Info] struct. +// +// Deprecated: use [system.NetworkAddressPool]. +type NetworkAddressPool = system.NetworkAddressPool + +// Runtime describes an OCI runtime. +// +// Deprecated: use [system.Runtime]. +type Runtime = system.Runtime + +// SecurityOpt contains the name and options of a security option. +// +// Deprecated: use [system.SecurityOpt]. +type SecurityOpt = system.SecurityOpt + +// KeyValue holds a key/value pair. +// +// Deprecated: use [system.KeyValue]. +type KeyValue = system.KeyValue + +// ImageDeleteResponseItem image delete response item. +// +// Deprecated: use [image.DeleteResponse]. +type ImageDeleteResponseItem = image.DeleteResponse + +// ImageSummary image summary. +// +// Deprecated: use [image.Summary]. +type ImageSummary = image.Summary + +// ImageMetadata contains engine-local data about the image. +// +// Deprecated: use [image.Metadata]. +type ImageMetadata = image.Metadata + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +// +// Deprecated: use [swarm.ServiceCreateResponse]. +type ServiceCreateResponse = swarm.ServiceCreateResponse + +// ServiceUpdateResponse service update response. +// +// Deprecated: use [swarm.ServiceUpdateResponse]. +type ServiceUpdateResponse = swarm.ServiceUpdateResponse + +// ContainerStartOptions holds parameters to start containers. +// +// Deprecated: use [container.StartOptions]. +type ContainerStartOptions = container.StartOptions + +// ResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +// +// Deprecated: use [container.ResizeOptions]. +type ResizeOptions = container.ResizeOptions + +// ContainerAttachOptions holds parameters to attach to a container. +// +// Deprecated: use [container.AttachOptions]. +type ContainerAttachOptions = container.AttachOptions + +// ContainerCommitOptions holds parameters to commit changes into a container. +// +// Deprecated: use [container.CommitOptions]. +type ContainerCommitOptions = container.CommitOptions + +// ContainerListOptions holds parameters to list containers with. +// +// Deprecated: use [container.ListOptions]. +type ContainerListOptions = container.ListOptions + +// ContainerLogsOptions holds parameters to filter logs with. +// +// Deprecated: use [container.LogsOptions]. +type ContainerLogsOptions = container.LogsOptions + +// ContainerRemoveOptions holds parameters to remove containers. +// +// Deprecated: use [container.RemoveOptions]. +type ContainerRemoveOptions = container.RemoveOptions + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// [system.SecurityOpt]. +// +// Deprecated: use [system.DecodeSecurityOptions]. +func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) { + return system.DecodeSecurityOptions(opts) +} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go index d3521ddfbb..fc855f133d 100644 --- a/vendor/github.com/docker/docker/builder/builder.go +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -8,13 +8,13 @@ import ( "context" "io" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) const ( @@ -45,7 +45,6 @@ type Backend interface { CommitBuildStep(context.Context, backend.CommitConfig) (image.ID, error) // ContainerCreateWorkdir creates the workdir ContainerCreateWorkdir(containerID string) error - CreateImage(ctx context.Context, config []byte, parent string, contentStoreDigest digest.Digest) (Image, error) ImageCacheBuilder @@ -61,11 +60,9 @@ type ExecBackend interface { // ContainerAttachRaw attaches to container. ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error // ContainerCreateIgnoreImagesArgsEscaped creates a new Docker container and returns potential warnings - ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error) + ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config backend.ContainerCreateConfig) (container.CreateResponse, error) // ContainerRm removes a container specified by `id`. - ContainerRm(name string, config *types.ContainerRmConfig) error - // ContainerKill stops the container execution abruptly. - ContainerKill(containerID string, sig string) error + ContainerRm(name string, config *backend.ContainerRmConfig) error // ContainerStart starts a new container ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error // ContainerWait stops processing until the given container is stopped. @@ -89,7 +86,7 @@ type ImageCacheBuilder interface { type ImageCache interface { // GetCache returns a reference to a cached image whose parent equals `parent` // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. - GetCache(parentID string, cfg *container.Config) (imageID string, err error) + GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error) } // Image represents a Docker image used by the builder. diff --git a/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go b/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go index 965d1c6b1c..146e5bdf3d 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go @@ -3,6 +3,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "fmt" "io" + "sort" "github.com/docker/docker/runconfig/opts" ) @@ -80,6 +81,7 @@ func (b *BuildArgs) WarnOnUnusedBuildArgs(out io.Writer) { } } if len(leftoverArgs) > 0 { + sort.Strings(leftoverArgs) fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go index 04cc07337e..804d294bb2 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/containerd/containerd/platforms" + "github.com/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -23,7 +24,6 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/shell" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/syncmap" ) @@ -76,7 +76,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( defer func() { if source != nil { if err := source.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + log.G(ctx).Debugf("[BUILDER] failed to remove temporary context: %v", err) } } }() @@ -199,7 +199,7 @@ func (b *Builder) build(ctx context.Context, source builder.Source, dockerfile * targetIx, found := instructions.HasStage(stages, b.options.Target) if !found { buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() - return nil, errdefs.InvalidParameter(errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)) + return nil, errdefs.InvalidParameter(errors.Errorf("target stage %q could not be found", b.options.Target)) } stages = stages[:targetIx+1] } @@ -283,7 +283,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseR for _, cmd := range stage.Commands { select { case <-ctx.Done(): - logrus.Debug("Builder: build cancelled!") + log.G(ctx).Debug("Builder: build cancelled!") fmt.Fprint(b.Stdout, "Build cancelled\n") buildsFailed.WithValues(metricsBuildCanceled).Inc() return nil, errors.New("Build cancelled") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go index 7d10028575..881d2ce6bd 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package dockerfile // import "github.com/docker/docker/builder/dockerfile" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go index ebb261a1c4..8986c1277a 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go @@ -5,13 +5,14 @@ import ( "fmt" "io" - "github.com/docker/docker/api/types" + "github.com/containerd/log" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/stringid" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type containerManager struct { @@ -29,15 +30,15 @@ func newContainerManager(docker builder.ExecBackend) *containerManager { // Create a container func (c *containerManager) Create(ctx context.Context, runConfig *container.Config, hostConfig *container.HostConfig) (container.CreateResponse, error) { - container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, types.ContainerCreateConfig{ + ctr, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, backend.ContainerCreateConfig{ Config: runConfig, HostConfig: hostConfig, }) if err != nil { - return container, err + return ctr, err } - c.tmpContainers[container.ID] = struct{}{} - return container, nil + c.tmpContainers[ctr.ID] = struct{}{} + return ctr, nil } var errCancelled = errors.New("build cancelled") @@ -60,9 +61,11 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i go func() { select { case <-ctx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) - c.backend.ContainerKill(cID, "") - c.removeContainer(cID, stdout) + log.G(ctx).Debugln("Build cancelled, removing container:", cID) + err = c.backend.ContainerRm(cID, &backend.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}) + if err != nil { + _, _ = fmt.Fprintf(stdout, "Removing container %s: %v\n", stringid.TruncateID(cID), err) + } cancelErrCh <- errCancelled case <-finished: cancelErrCh <- nil @@ -102,7 +105,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i func logCancellationError(cancelErrCh chan error, msg string) { if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg) + log.G(context.TODO()).Debugf("Build cancelled (%v): %s", cancelErr, msg) } } @@ -122,25 +125,14 @@ func (e *statusCodeError) StatusCode() int { return e.code } -func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { - rmConfig := &types.ContainerRmConfig{ - ForceRemove: true, - RemoveVolume: true, - } - if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { - fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) - return err - } - return nil -} - // RemoveAll containers managed by this container manager func (c *containerManager) RemoveAll(stdout io.Writer) { for containerID := range c.tmpContainers { - if err := c.removeContainer(containerID, stdout); err != nil { - return + if err := c.backend.ContainerRm(containerID, &backend.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil && !errdefs.IsNotFound(err) { + _, _ = fmt.Fprintf(stdout, "Removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + continue } delete(c.tmpContainers, containerID) - fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + _, _ = fmt.Fprintf(stdout, " ---> Removed intermediate container %s\n", stringid.TruncateID(containerID)) } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy.go b/vendor/github.com/docker/docker/builder/dockerfile/copy.go index 7919c972fd..62fbfb656d 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/copy.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy.go @@ -8,7 +8,6 @@ import ( "net/url" "os" "path/filepath" - "runtime" "sort" "strings" "time" @@ -17,13 +16,13 @@ import ( "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/builder/remotecontext/urlutil" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/longpath" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/system" "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/sys/symlink" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -45,7 +44,7 @@ type copyInfo struct { } func (c copyInfo) fullPath() (string, error) { - return containerfs.ResolveScopedPath(c.root, c.path) + return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root) } func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { @@ -74,7 +73,7 @@ type copier struct { source builder.Source pathCache pathCache download sourceDownloader - platform *ocispec.Platform + platform ocispec.Platform // for cleanup. TODO: having copier.cleanup() is error prone and hard to // follow. Code calling performCopy should manage the lifecycle of its params. // Copier should take override source as input, not imageMount. @@ -83,19 +82,7 @@ type copier struct { } func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { - platform := req.builder.platform - if platform == nil { - // May be nil if not explicitly set in API/dockerfile - platform = &ocispec.Platform{} - } - if platform.OS == "" { - // Default to the dispatch requests operating system if not explicit in API/dockerfile - platform.OS = req.state.operatingSystem - } - if platform.OS == "" { - // This is a failsafe just in case. Shouldn't be hit. - platform.OS = runtime.GOOS - } + platform := req.builder.getPlatform(req.state) return copier{ source: req.source, @@ -406,7 +393,7 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b tmpFileName = unnamedFilename } tmpFileName = filepath.Join(tmpDir, tmpFileName) - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) if err != nil { return } @@ -510,11 +497,11 @@ func copyFile(archiver *archive.Archiver, source, dest string, identity *idtools // modified for use on Windows to handle volume GUID paths. These paths // are of the form \\?\Volume{}\. An example would be: // \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe - if err := system.MkdirAll(filepath.Dir(dest), 0755); err != nil { + if err := system.MkdirAll(filepath.Dir(dest), 0o755); err != nil { return err } } else { - if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, *identity); err != nil { + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0o755, *identity); err != nil { return errors.Wrapf(err, "failed to create new directory") } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go index d13d7a9890..a90afb98cb 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package dockerfile // import "github.com/docker/docker/builder/dockerfile" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go index 663456734e..7183042012 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -22,7 +22,6 @@ import ( "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" @@ -331,8 +330,8 @@ func dispatchWorkdir(ctx context.Context, d dispatchRequest, c *instructions.Wor // RUN echo hi # cmd /S /C echo hi (Windows) // RUN [ "echo", "hi" ] # echo hi func dispatchRun(ctx context.Context, d dispatchRequest, c *instructions.RunCommand) error { - if !system.IsOSSupported(d.state.operatingSystem) { - return system.ErrNotSupportedOperatingSystem + if err := image.CheckOS(d.state.operatingSystem); err != nil { + return err } if len(c.FlagsUsed) > 0 { @@ -349,9 +348,16 @@ func dispatchRun(ctx context.Context, d dispatchRequest, c *instructions.RunComm saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs) } + cacheArgsEscaped := argsEscaped + // ArgsEscaped is not persisted in the committed image on Windows. + // Use the original from previous build steps for cache probing. + if d.state.operatingSystem == "windows" { + cacheArgsEscaped = stateRunConfig.ArgsEscaped + } + runConfigForCacheProbe := copyRunConfig(stateRunConfig, withCmd(saveCmd), - withArgsEscaped(argsEscaped), + withArgsEscaped(cacheArgsEscaped), withEntrypointOverride(saveCmd, nil)) if hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe); err != nil || hit { return err diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go index 87dbe72192..ba8e1d9053 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package dockerfile // import "github.com/docker/docker/builder/dockerfile" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go index 88fa896f67..34cf93de16 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -108,7 +108,6 @@ func normalizeWorkdirWindows(current string, requested string) (string, error) { // // The commands when this function is called are RUN, ENTRYPOINT and CMD. func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os, command, original string) ([]string, bool) { - // Make sure we return an empty array if there is no cmd.CmdLine if len(cmd.CmdLine) == 0 { return []string{}, runConfig.ArgsEscaped diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go index 9d5601528b..3b6f1bd153 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -28,8 +28,8 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig/opts" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/shell" @@ -213,21 +213,21 @@ func (s *dispatchState) hasFromImage() bool { return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") } -func (s *dispatchState) beginStage(stageName string, image builder.Image) error { +func (s *dispatchState) beginStage(stageName string, img builder.Image) error { s.stageName = stageName - s.imageID = image.ImageID() - s.operatingSystem = image.OperatingSystem() - if !system.IsOSSupported(s.operatingSystem) { - return system.ErrNotSupportedOperatingSystem + s.imageID = img.ImageID() + s.operatingSystem = img.OperatingSystem() + if err := image.CheckOS(s.operatingSystem); err != nil { + return err } - if image.RunConfig() != nil { + if img.RunConfig() != nil { // copy avoids referencing the same instance when 2 stages have the same base - s.runConfig = copyRunConfig(image.RunConfig()) + s.runConfig = copyRunConfig(img.RunConfig()) } else { s.runConfig = &container.Config{} } - s.baseImage = image + s.baseImage = img s.setDefaultPath() s.runConfig.OpenStdin = false s.runConfig.StdinOnce = false diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go index 7dada66596..e943c22951 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -5,12 +5,12 @@ import ( "runtime" "github.com/containerd/containerd/platforms" + "github.com/containerd/log" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" dockerimage "github.com/docker/docker/image" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type getAndMountFunc func(context.Context, string, bool, *ocispec.Platform) (builder.Image, builder.ROLayer, error) @@ -64,7 +64,7 @@ func (m *imageSources) Get(ctx context.Context, idOrRef string, localOnly bool, func (m *imageSources) Unmount() (retErr error) { for _, im := range m.mounts { if err := im.unmount(); err != nil { - logrus.Error(err) + log.G(context.TODO()).Error(err) retErr = err } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go index 5ef622193b..de00c912e8 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go @@ -3,16 +3,17 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "context" + "github.com/containerd/log" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" - "github.com/sirupsen/logrus" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageProber exposes an Image cache to the Builder. It supports resetting a // cache. type ImageProber interface { Reset(ctx context.Context) error - Probe(parentID string, runConfig *container.Config) (string, error) + Probe(parentID string, runConfig *container.Config, platform ocispec.Platform) (string, error) } type resetFunc func(context.Context) (builder.ImageCache, error) @@ -51,20 +52,20 @@ func (c *imageProber) Reset(ctx context.Context) error { // Probe checks if cache match can be found for current build instruction. // It returns the cachedID if there is a hit, and the empty string on miss -func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { +func (c *imageProber) Probe(parentID string, runConfig *container.Config, platform ocispec.Platform) (string, error) { if c.cacheBusted { return "", nil } - cacheID, err := c.cache.GetCache(parentID, runConfig) + cacheID, err := c.cache.GetCache(parentID, runConfig, platform) if err != nil { return "", err } if len(cacheID) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + log.G(context.TODO()).Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) c.cacheBusted = true return "", nil } - logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + log.G(context.TODO()).Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) return cacheID, nil } @@ -74,6 +75,6 @@ func (c *nopProber) Reset(ctx context.Context) error { return nil } -func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { +func (c *nopProber) Probe(_ string, _ *container.Config, _ ocispec.Platform) (string, error) { return "", nil } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go index 050deb1aad..b6b0b8f42a 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -10,6 +10,8 @@ import ( "fmt" "strings" + "github.com/containerd/containerd/platforms" + "github.com/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -21,7 +23,6 @@ import ( "github.com/docker/go-connections/nat" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func (b *Builder) getArchiver() *archive.Archiver { @@ -122,7 +123,7 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop var chownComment string if inst.chownStr != "" { - chownComment = fmt.Sprintf("--chown=%s", inst.chownStr) + chownComment = fmt.Sprintf("--chown=%s ", inst.chownStr) } commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest) @@ -328,7 +329,7 @@ func getShell(c *container.Config, os string) []string { } func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { - cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig, b.getPlatform(dispatchState)) if cachedID == "" || err != nil { return false, err } @@ -348,7 +349,7 @@ func (b *Builder) probeAndCreate(ctx context.Context, dispatchState *dispatchSta } func (b *Builder) create(ctx context.Context, runConfig *container.Config) (string, error) { - logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + log.G(ctx).Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) hostConfig := hostConfigFromOptions(b.options) container, err := b.containerManager.Create(ctx, runConfig, hostConfig) @@ -388,3 +389,17 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf } return hc } + +func (b *Builder) getPlatform(state *dispatchState) ocispec.Platform { + // May be nil if not explicitly set in API/dockerfile + out := platforms.DefaultSpec() + if b.platform != nil { + out = *b.platform + } + + if state.operatingSystem != "" { + out.OS = state.operatingSystem + } + + return out +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go index 37e096590f..4af7376264 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/sys/symlink" - lcUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" "github.com/pkg/errors" ) @@ -57,7 +57,7 @@ func lookupUser(userStr, filepath string) (int, error) { if err == nil { return uid, nil } - users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool { + users, err := user.ParsePasswdFileFilter(filepath, func(u user.User) bool { return u.Name == userStr }) if err != nil { @@ -76,7 +76,7 @@ func lookupGroup(groupStr, filepath string) (int, error) { if err == nil { return gid, nil } - groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool { + groups, err := user.ParseGroupFileFilter(filepath, func(g user.Group) bool { return g.Name == groupStr }) if err != nil { diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go index 53c302c714..b21c7e8a8c 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -45,7 +45,6 @@ func getAccountIdentity(ctx context.Context, builder *Builder, accountName strin // Check if the account name is one unique to containers. if strings.EqualFold(accountName, "ContainerAdministrator") { return idtools.Identity{SID: idtools.ContainerAdministratorSidString}, nil - } else if strings.EqualFold(accountName, "ContainerUser") { return idtools.Identity{SID: idtools.ContainerUserSidString}, nil } @@ -56,7 +55,6 @@ func getAccountIdentity(ctx context.Context, builder *Builder, accountName strin } func lookupNTAccount(ctx context.Context, builder *Builder, accountName string, state *dispatchState) (idtools.Identity, error) { - source, _ := filepath.Split(os.Args[0]) target := "C:\\Docker" @@ -72,14 +70,15 @@ func lookupNTAccount(ctx context.Context, builder *Builder, accountName string, runConfig.Cmd = []string{targetExecutable, "getaccountsid", accountName} - hostConfig := &container.HostConfig{Mounts: []mount.Mount{ - { - Type: mount.TypeBind, - Source: source, - Target: target, - ReadOnly: true, + hostConfig := &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: source, + Target: target, + ReadOnly: true, + }, }, - }, } container, err := builder.containerManager.Create(ctx, runConfig, hostConfig) diff --git a/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/vendor/github.com/docker/docker/builder/remotecontext/archive.go index bfe937b9aa..5201feedd3 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/archive.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/archive.go @@ -8,9 +8,10 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" + "github.com/moby/sys/symlink" "github.com/pkg/errors" ) @@ -24,9 +25,11 @@ func (c *archiveContext) Close() error { } func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { + switch err := err.(type) { + case *os.PathError: + err.Path = cleanpath + case *system.XattrError: err.Path = cleanpath - return err } return err } @@ -117,7 +120,7 @@ func (c *archiveContext) Hash(path string) (string, error) { func normalize(path string, root string) (cleanPath, fullPath string, err error) { cleanPath = filepath.Clean(string(filepath.Separator) + path)[1:] - fullPath, err = containerfs.ResolveScopedPath(root, path) + fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root) if err != nil { return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/vendor/github.com/docker/docker/builder/remotecontext/detect.go index a7b991f416..36c7569f3c 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/detect.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/detect.go @@ -2,23 +2,25 @@ package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( "bufio" + "context" "fmt" "io" "os" + "path/filepath" "runtime" "strings" "github.com/containerd/continuity/driver" + "github.com/containerd/log" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" "github.com/docker/docker/builder/remotecontext/urlutil" "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/containerfs" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/patternmatcher" "github.com/moby/patternmatcher/ignorefile" + "github.com/moby/sys/symlink" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ClientSessionRemote is identifier for client-session context transport @@ -102,7 +104,7 @@ func newURLRemote(url string, dockerfilePath string, progressReader func(in io.R defer content.Close() switch contentType { - case mimeTypes.TextPlain: + case mimeTypeTextPlain: res, err := parser.Parse(progressReader(content)) return nil, res, errdefs.InvalidParameter(err) default: @@ -133,7 +135,7 @@ func removeDockerfile(c modifiableContext, filesToRemove ...string) error { for _, fileToRemove := range filesToRemove { if rm, _ := patternmatcher.MatchesOrParentMatches(fileToRemove, excludes); rm { if err := c.Remove(fileToRemove); err != nil { - logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + log.G(context.TODO()).Errorf("failed to remove %s: %v", fileToRemove, err) } } } @@ -176,7 +178,8 @@ func StatAt(remote builder.Source, path string) (os.FileInfo, error) { // FullPath is a helper for getting a full path for a path from a source func FullPath(remote builder.Source, path string) (string, error) { - fullPath, err := containerfs.ResolveScopedPath(remote.Root(), path) + remoteRoot := remote.Root() + fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remoteRoot, path), remoteRoot) if err != nil { if runtime.GOOS == "windows" { return "", fmt.Errorf("failed to resolve scoped path %s (%s): %s. Possible cause is a forbidden path outside the build context", path, fullPath, err) diff --git a/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/vendor/github.com/docker/docker/builder/remotecontext/generate.go deleted file mode 100644 index 84c1b3b5ea..0000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git.go b/vendor/github.com/docker/docker/builder/remotecontext/git.go index 85efba24f3..b22416fb5b 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/git.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/git.go @@ -1,12 +1,13 @@ package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( + "context" "os" + "github.com/containerd/log" "github.com/docker/docker/builder" "github.com/docker/docker/builder/remotecontext/git" "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" ) // MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. @@ -24,11 +25,11 @@ func MakeGitContext(gitURL string) (builder.Source, error) { defer func() { err := c.Close() if err != nil { - logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") + log.G(context.TODO()).WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") } err = os.RemoveAll(root) if err != nil { - logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") + log.G(context.TODO()).WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") } }() return FromArchive(c) diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go index f9b2b4b9c4..4270e86ef5 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go @@ -35,7 +35,6 @@ func WithIsolatedConfig(v bool) CloneOption { // will be under "docker-build-git" func Clone(remoteURL string, opts ...CloneOption) (string, error) { repo, err := parseRemoteURL(remoteURL) - if err != nil { return "", err } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go index e8a6210e9c..3d29b0d476 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go @@ -5,11 +5,11 @@ import ( "net/http" ) -// mimeTypes stores the MIME content type. -var mimeTypes = struct { - TextPlain string - OctetStream string -}{"text/plain", "application/octet-stream"} +// MIME content types. +const ( + mimeTypeTextPlain = "text/plain" + mimeTypeOctetStream = "application/octet-stream" +) // detectContentType returns a best guess representation of the MIME // content type for the bytes at c. The value detected by @@ -17,11 +17,10 @@ var mimeTypes = struct { // application/octet-stream when a better guess cannot be made. The // result of this detection is then run through mime.ParseMediaType() // which separates the actual MIME string from any parameters. -func detectContentType(c []byte) (string, map[string]string, error) { - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) +func detectContentType(c []byte) (string, error) { + contentType, _, err := mime.ParseMediaType(http.DetectContentType(c)) if err != nil { - return "", nil, err + return "", err } - return contentType, args, nil + return contentType, nil } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/remote.go b/vendor/github.com/docker/docker/builder/remotecontext/remote.go index 8f09ed0997..6bac5d1d62 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/remote.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/remote.go @@ -105,8 +105,8 @@ func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, err // content type for files without an extension (e.g. 'Dockerfile') // so if we receive this value we better check for text content contentType := ct - if len(ct) == 0 || ct == mimeTypes.OctetStream { - contentType, _, err = detectContentType(preamble) + if len(ct) == 0 || ct == mimeTypeOctetStream { + contentType, err = detectContentType(preamble) if err != nil { return contentType, bodyReader, err } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go deleted file mode 100644 index 19e409eee5..0000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go +++ /dev/null @@ -1,161 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "os" - "path/filepath" - "sync" - - iradix "github.com/hashicorp/go-immutable-radix" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" -) - -type hashed interface { - Digest() digest.Digest -} - -// CachableSource is a source that contains cache records for its contents. -// -// Deprecated: this type was used for the experimental "stream" support for the classic builder, which is no longer supported. -type CachableSource struct { - mu sync.Mutex - root string - tree *iradix.Tree - txn *iradix.Txn -} - -// NewCachableSource creates new CachableSource. -// -// Deprecated: this type was used for the experimental "stream" support for the classic builder, which is no longer supported. -func NewCachableSource(root string) *CachableSource { - ts := &CachableSource{ - tree: iradix.New(), - root: root, - } - return ts -} - -// MarshalBinary marshals current cache information to a byte array -func (cs *CachableSource) MarshalBinary() ([]byte, error) { - b := TarsumBackup{Hashes: make(map[string]string)} - root := cs.getRoot() - root.Walk(func(k []byte, v interface{}) bool { - b.Hashes[string(k)] = v.(*fileInfo).sum - return false - }) - return b.Marshal() -} - -// UnmarshalBinary decodes cache information for presented byte array -func (cs *CachableSource) UnmarshalBinary(data []byte) error { - var b TarsumBackup - if err := b.Unmarshal(data); err != nil { - return err - } - txn := iradix.New().Txn() - for p, v := range b.Hashes { - txn.Insert([]byte(p), &fileInfo{sum: v}) - } - cs.mu.Lock() - defer cs.mu.Unlock() - cs.tree = txn.Commit() - return nil -} - -// Scan rescans the cache information from the file system -func (cs *CachableSource) Scan() error { - lc, err := NewLazySource(cs.root) - if err != nil { - return err - } - txn := iradix.New().Txn() - err = filepath.WalkDir(cs.root, func(path string, _ os.DirEntry, err error) error { - if err != nil { - return errors.Wrapf(err, "failed to walk %s", path) - } - rel, err := Rel(cs.root, path) - if err != nil { - return err - } - h, err := lc.Hash(rel) - if err != nil { - return err - } - txn.Insert([]byte(rel), &fileInfo{sum: h}) - return nil - }) - if err != nil { - return err - } - cs.mu.Lock() - defer cs.mu.Unlock() - cs.tree = txn.Commit() - return nil -} - -// HandleChange notifies the source about a modification operation -func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - cs.mu.Lock() - if cs.txn == nil { - cs.txn = cs.tree.Txn() - } - if kind == fsutil.ChangeKindDelete { - cs.txn.Delete([]byte(p)) - cs.mu.Unlock() - return - } - - h, ok := fi.(hashed) - if !ok { - cs.mu.Unlock() - return errors.Errorf("invalid fileinfo: %s", p) - } - - hfi := &fileInfo{ - sum: h.Digest().Encoded(), - } - cs.txn.Insert([]byte(p), hfi) - cs.mu.Unlock() - return nil -} - -func (cs *CachableSource) getRoot() *iradix.Node { - cs.mu.Lock() - if cs.txn != nil { - cs.tree = cs.txn.Commit() - cs.txn = nil - } - t := cs.tree - cs.mu.Unlock() - return t.Root() -} - -// Close closes the source -func (cs *CachableSource) Close() error { - return nil -} - -// Hash returns a hash for a single file in the source -func (cs *CachableSource) Hash(path string) (string, error) { - n := cs.getRoot() - // TODO: check this for symlinks - v, ok := n.Get([]byte(path)) - if !ok { - return path, nil - } - return v.(*fileInfo).sum, nil -} - -// Root returns a root directory for the source -func (cs *CachableSource) Root() string { - return cs.root -} - -type fileInfo struct { - sum string -} - -func (fi *fileInfo) Hash() string { - return fi.sum -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go deleted file mode 100644 index 1d23bbe65b..0000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go +++ /dev/null @@ -1,525 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: tarsum.proto -// DO NOT EDIT! - -/* -Package remotecontext is a generated protocol buffer package. - -It is generated from these files: - tarsum.proto - -It has these top-level messages: - TarsumBackup -*/ -package remotecontext - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type TarsumBackup struct { - Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } -func (*TarsumBackup) ProtoMessage() {} -func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } - -func (m *TarsumBackup) GetHashes() map[string]string { - if m != nil { - return m.Hashes - } - return nil -} - -func init() { - proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") -} -func (this *TarsumBackup) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*TarsumBackup) - if !ok { - that2, ok := that.(TarsumBackup) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.Hashes) != len(that1.Hashes) { - return false - } - for i := range this.Hashes { - if this.Hashes[i] != that1.Hashes[i] { - return false - } - } - return true -} -func (this *TarsumBackup) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&remotecontext.TarsumBackup{") - keysForHashes := make([]string, 0, len(this.Hashes)) - for k := range this.Hashes { - keysForHashes = append(keysForHashes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) - mapStringForHashes := "map[string]string{" - for _, k := range keysForHashes { - mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) - } - mapStringForHashes += "}" - if this.Hashes != nil { - s = append(s, "Hashes: "+mapStringForHashes+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringTarsum(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hashes) > 0 { - for k := range m.Hashes { - dAtA[i] = 0xa - i++ - v := m.Hashes[k] - mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) - i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintTarsum(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintTarsum(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *TarsumBackup) Size() (n int) { - var l int - _ = l - if len(m.Hashes) > 0 { - for k, v := range m.Hashes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) - n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) - } - } - return n -} - -func sovTarsum(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozTarsum(x uint64) (n int) { - return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TarsumBackup) String() string { - if this == nil { - return "nil" - } - keysForHashes := make([]string, 0, len(this.Hashes)) - for k := range this.Hashes { - keysForHashes = append(keysForHashes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) - mapStringForHashes := "map[string]string{" - for _, k := range keysForHashes { - mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) - } - mapStringForHashes += "}" - s := strings.Join([]string{`&TarsumBackup{`, - `Hashes:` + mapStringForHashes + `,`, - `}`, - }, "") - return s -} -func valueToStringTarsum(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *TarsumBackup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTarsum - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTarsum - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hashes == nil { - m.Hashes = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTarsum - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Hashes[mapkey] = mapvalue - } else { - var mapvalue string - m.Hashes[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTarsum(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTarsum - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTarsum(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthTarsum - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipTarsum(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } - -var fileDescriptorTarsum = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, - 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, - 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, - 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, - 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, - 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, - 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, - 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, - 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, - 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, - 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, - 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, - 0xe0, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto deleted file mode 100644 index cb94240ba8..0000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto +++ /dev/null @@ -1,7 +0,0 @@ -syntax = "proto3"; - -package remotecontext; // no namespace because only used internally - -message TarsumBackup { - map Hashes = 1; -} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/remotecontext/urlutil/urlutil.go b/vendor/github.com/docker/docker/builder/remotecontext/urlutil/urlutil.go index e38988a30c..e8459cc820 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/urlutil/urlutil.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/urlutil/urlutil.go @@ -12,7 +12,7 @@ import ( // urlPathWithFragmentSuffix matches fragments to use as Git reference and build // context from the Git repository. See IsGitURL for details. -var urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +var urlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) // IsURL returns true if the provided str is an HTTP(S) URL by checking if it // has a http:// or https:// scheme. No validation is performed to verify if the diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md index 992f18117d..f8af3ab903 100644 --- a/vendor/github.com/docker/docker/client/README.md +++ b/vendor/github.com/docker/docker/client/README.md @@ -1,8 +1,10 @@ # Go client for the Docker Engine API -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. +The `docker` command uses this package to communicate with the daemon. It can +also be used by your own Go applications to do anything the command-line +interface does – running containers, pulling images, managing swarms, etc. -For example, to list running containers (the equivalent of `docker ps`): +For example, to list all containers (the equivalent of `docker ps --all`): ```go package main @@ -11,25 +13,26 @@ import ( "context" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) + apiClient, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } + defer apiClient.Close() - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true}) if err != nil { panic(err) } - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) + for _, ctr := range containers { + fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status) } } ``` -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) +[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 2b6606236e..1a830f4135 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -13,7 +13,7 @@ import ( // BuildCachePrune requests the daemon to delete unused cache data func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 921024fe4f..9746d288df 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -3,11 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index 54f55fa76e..b968c2b237 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { query := url.Values{} if options.CheckpointDir != "" { query.Set("dir", options.CheckpointDir) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 39cfb959ff..8feb1f3f7d 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -5,12 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint +func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) { + var checkpoints []checkpoint.Summary query := url.Values{} if options.CheckpointDir != "" { diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 54fa36cca8..0b496b0fa6 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -19,7 +19,7 @@ For example, to list running containers (the equivalent of "docker ps"): "context" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) @@ -29,13 +29,13 @@ For example, to list running containers (the equivalent of "docker ps"): panic(err) } - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + containers, err := cli.ContainerList(context.Background(), container.ListOptions{}) if err != nil { panic(err) } - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) + for _, ctr := range containers { + fmt.Printf("%s %s\n", ctr.ID, ctr.Image) } } */ @@ -43,17 +43,21 @@ package client // import "github.com/docker/docker/client" import ( "context" + "crypto/tls" "net" "net/http" "net/url" "path" "strings" + "time" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/trace" ) // DummyHost is a hostname used for local communication. @@ -86,8 +90,12 @@ import ( // [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 const DummyHost = "api.moby.localhost" -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") +// fallbackAPIVersion is the version to fallback to if API-version negotiation +// fails. This version is the highest version of the API before API-version +// negotiation was introduced. If negotiation fails (or no API version was +// included in the API response), we assume the API server uses the most +// recent version before negotiation was introduced. +const fallbackAPIVersion = "1.24" // Client is the API client that performs all operations // against a docker server. @@ -106,7 +114,12 @@ type Client struct { client *http.Client // version of the server to talk to. version string - // custom http headers configured by users. + // userAgent is the User-Agent header to use for HTTP requests. It takes + // precedence over User-Agent headers set in customHTTPHeaders, and other + // header variables. When set to an empty string, the User-Agent header + // is removed, and no header is sent. + userAgent *string + // custom HTTP headers configured by users. customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool @@ -119,22 +132,33 @@ type Client struct { // negotiated indicates that API version negotiation took place negotiated bool + + tp trace.TracerProvider + + // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). + // Store the original transport as the http.Client transport will be wrapped with tracing libs. + baseTransport *http.Transport } -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return ErrRedirect, otherwise use the last response. +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// CheckRedirect specifies the policy for dealing with redirect responses. It +// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for +// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise +// returns a [http.ErrUseLastResponse], which is special-cased by http.Client +// to use the last response. // -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) -// in the client. The Docker client (and by extension docker API client) can be -// made to send a request like POST /containers//start where what would normally -// be in the name section of the URL is empty. This triggers an HTTP 301 from -// the daemon. +// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The client (and by extension API client) can be made to send +// a request like "POST /containers//start" where what would normally be in the +// name section of the URL is empty. This triggers an HTTP 301 from the daemon. // -// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// In go 1.8 this 301 is converted to a GET request, and ends up getting // a 404 from the daemon. This behavior change manifests in the client in that // before, the 301 was not followed and the client did not generate an error, -// but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { +// but now results in a message like "Error response from daemon: page not found". +func CheckRedirect(_ *http.Request, via []*http.Request) error { if via[0].Method == http.MethodGet { return http.ErrUseLastResponse } @@ -145,11 +169,11 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { // default API host and version. It also initializes the custom HTTP headers to // add to each request. // -// It takes an optional list of Opt functional arguments, which are applied in +// It takes an optional list of [Opt] functional arguments, which are applied in // the order they're provided, which allows modifying the defaults when creating // the client. For example, the following initializes a client that configures -// itself with values from environment variables (client.FromEnv), and has -// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// itself with values from environment variables ([FromEnv]), and has automatic +// API version negotiation enabled ([WithAPIVersionNegotiation]). // // cli, err := client.NewClientWithOpts( // client.FromEnv, @@ -179,23 +203,43 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } } + if tr, ok := c.client.Transport.(*http.Transport); ok { + // Store the base transport before we wrap it in tracing libs below + // This is used, as an example, to close idle connections when the client is closed + c.baseTransport = tr + } + if c.scheme == "" { - c.scheme = "http" - - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + if c.tlsConfig() != nil { c.scheme = "https" + } else { + c.scheme = "http" } } + c.client.Transport = otelhttp.NewTransport( + c.client.Transport, + otelhttp.WithTracerProvider(c.tp), + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + ) + return c, nil } +func (cli *Client) tlsConfig() *tls.Config { + if cli.baseTransport == nil { + return nil + } + return cli.baseTransport.TLSClientConfig +} + func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { transport := &http.Transport{} err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) @@ -210,19 +254,28 @@ func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { // Close the transport used by the client func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() + if cli.baseTransport != nil { + cli.baseTransport.CloseIdleConnections() + return nil } return nil } -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string +// checkVersion manually triggers API version negotiation (if configured). +// This allows for version-dependent code to use the same version as will +// be negotiated when making the actual requests, and for which cases +// we cannot do the negotiation lazily. +func (cli *Client) checkVersion(ctx context.Context) { if cli.negotiateVersion && !cli.negotiated { cli.NegotiateAPIVersion(ctx) } +} + +// getAPIPath returns the versioned request path to call the API. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + cli.checkVersion(ctx) if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -244,8 +297,8 @@ func (cli *Client) ClientVersion() string { // by the client, it uses the client's maximum version. // // If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. +// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized +// with a fixed version ([WithVersion]), no negotiation is performed. // // If the API server's ping response does not contain an API version, or if the // client did not get a successful ping response, it assumes it is connected with @@ -265,8 +318,8 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { // version. // // If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. +// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized +// with a fixed version ([WithVersion]), no negotiation is performed. // // If the API server's ping response does not contain an API version, we assume // we are connected with an old daemon without API version negotiation support, @@ -283,7 +336,7 @@ func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { // default to the latest version before versioning headers existed if pingResponse.APIVersion == "" { - pingResponse.APIVersion = "1.24" + pingResponse.APIVersion = fallbackAPIVersion } // if the client is not initialized with a version, start with the latest supported version @@ -338,17 +391,40 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } +func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { + if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { + return nil + } + + if cli.baseTransport.TLSClientConfig != nil { + // When using a tls config we don't use the configured dialer but instead a fallback dialer... + // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn + // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. + return nil + } + return cli.baseTransport.DialContext +} + // Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, -// that can be used for proxying the daemon connection. +// that can be used for proxying the daemon connection. It is used by +// ["docker dial-stdio"]. // -// Used by `docker dial-stdio` (docker/cli#889). +// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { - if transport, ok := cli.client.Transport.(*http.Transport); ok { - if transport.DialContext != nil && transport.TLSClientConfig == nil { - return transport.DialContext(ctx, cli.proto, cli.addr) + if dialFn := cli.dialerFromTransport(); dialFn != nil { + return dialFn(ctx, cli.proto, cli.addr) + } + switch cli.proto { + case "unix": + return net.Dial(cli.proto, cli.addr) + case "npipe": + return sockets.DialPipe(cli.addr, 32*time.Second) + default: + if tlsConfig := cli.tlsConfig(); tlsConfig != nil { + return tls.Dial(cli.proto, cli.addr, tlsConfig) } + return net.Dial(cli.proto, cli.addr) } - return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) } } diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 319b738d3e..9fe78ea43a 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package client // import "github.com/docker/docker/client" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index f6b1881fc3..3deb4a8e2a 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -11,7 +11,7 @@ import ( // ConfigCreate creates a new config. func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { return response, err } resp, err := cli.post(ctx, "/configs/create", nil, config, nil) diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 9be7882c3d..2c6c7cb36f 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -14,7 +14,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C if id == "" { return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { return swarm.Config{}, nil, err } resp, err := cli.get(ctx, "/configs/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index 565acc6e27..14dd3813e3 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -12,7 +12,7 @@ import ( // ConfigList returns the list of configs. func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { return nil, err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index 24b94e9c18..d05b0113aa 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -4,7 +4,7 @@ import "context" // ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 1ac2985435..6995861df0 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -9,7 +9,7 @@ import ( // ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { return err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index ba92117d3e..6a32e5f664 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -2,9 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "context" + "net/http" "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerAttach attaches a connection to a container in the server. @@ -31,7 +33,7 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { +func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { query.Set("stream", "1") @@ -52,8 +54,7 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - headers := map[string][]string{ + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ "Content-Type": {"text/plain"}, - } - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) + }) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index cd7f763464..26b3f09158 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -6,12 +6,13 @@ import ( "errors" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { ref, err := reference.ParseNormalizedNamed(options.Reference) diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 193a2bb562..409f5b492a 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -23,10 +23,23 @@ type configWrapper struct { func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { var response container.CreateResponse - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + if err := cli.NewVersionError(ctx, "1.41", "specify container image platform"); platform != nil && err != nil { return response, err } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + if err := cli.NewVersionError(ctx, "1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil { + return response, err + } + if err := cli.NewVersionError(ctx, "1.44", "specify mac-address per network"); hasEndpointSpecificMacAddress(networkingConfig) && err != nil { return response, err } @@ -45,6 +58,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } } + // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.44") { + config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44. + } + query := url.Values{} if p := formatPlatform(platform); p != "" { query.Set("platform", p) @@ -81,3 +99,16 @@ func formatPlatform(platform *ocispec.Platform) string { } return path.Join(platform.OS, platform.Architecture, platform.Variant) } + +// hasEndpointSpecificMacAddress checks whether one of the endpoint in networkingConfig has a MacAddress defined. +func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) bool { + if networkingConfig == nil { + return false + } + for _, endpoint := range networkingConfig.EndpointsConfig { + if endpoint.MacAddress != "" { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 6a2cb006f8..3fff0c8288 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" @@ -12,7 +13,14 @@ import ( func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { var response types.IDResponse - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil { return response, err } if versions.LessThan(cli.ClientVersion(), "1.42") { @@ -46,10 +54,9 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } - headers := map[string][]string{ + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{ "Content-Type": {"application/json"}, - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) + }) } // ContainerExecInspect returns information about a specific exec process on the docker host. diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index bd491b3db9..782e1b3c62 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -7,11 +7,12 @@ import ( "strconv" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { +func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) { query := url.Values{} if options.All { @@ -37,7 +38,6 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 9bdf2b0fa6..61197d8407 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -6,7 +6,7 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/pkg/errors" ) @@ -33,7 +33,7 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 04383deaaf..ca50923844 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { var report types.ContainersPruneReport - if err := cli.NewVersionError("1.25", "container prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index c21de609b0..39f7b106a1 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index a9d4c0c79a..5cfd01d479 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -5,16 +5,16 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 1e0ad99981..825d3e4e9d 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -17,8 +17,16 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) + if options.Signal != "" { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } } resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index c2e0b15dca..33ba85f248 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { query := url.Values{} if len(options.CheckpointID) != 0 { query.Set("checkpoint", options.CheckpointID) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 0a6488dde8..3fabb75f32 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -21,8 +21,10 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea return types.ContainerStats{}, err } - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err + return types.ContainerStats{ + Body: resp.body, + OSType: getDockerOS(resp.header.Get("Server")), + }, nil } // ContainerStatsOneShot gets a single stat entry from a container. @@ -37,6 +39,8 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string return types.ContainerStats{}, err } - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err + return types.ContainerStats{ + Body: resp.body, + OSType: getDockerOS(resp.header.Get("Server")), + }, nil } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 2a43ce2274..ac0cab69de 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -21,8 +21,16 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) + if options.Signal != "" { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } } resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 2375eb1e80..b8d3bdef0d 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -30,6 +30,12 @@ const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } @@ -66,8 +72,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // // If there's a JSON parsing error, read the real error message // off the body and send it to the client. - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } return } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index efab066d3b..68ef31b78b 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "github.com/docker/docker/api/types/registry" @@ -16,13 +17,13 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist return distributionInspect, objectNotFoundError{object: "distribution", id: image} } - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { return distributionInspect, err } - var headers map[string][]string + var headers http.Header if encodedRegistryAuth != "" { - headers = map[string][]string{ + headers = http.Header{ registry.AuthHeader: {encodedRegistryAuth}, } } diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 6878144c41..4b96b02085 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "github.com/docker/docker/api/types/versions" @@ -31,20 +32,10 @@ func ErrorConnectionFailed(host string) error { return errConnectionFailed{host: host} } -// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility -type notFound interface { - error - NotFound() bool -} - // IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. +// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. func IsErrNotFound(err error) bool { - if errdefs.IsNotFound(err) { - return true - } - var e notFound - return errors.As(err, &e) + return errdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -58,9 +49,18 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { +// NewVersionError returns an error if the APIVersion required is less than the +// current supported version. +// +// It performs API-version negotiation if the Client is configured with this +// option, otherwise it assumes the latest API version is used. +func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature string) error { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) if cli.version != "" && versions.LessThan(cli.version, APIrequired) { return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) } diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 7e84865f69..839d4c5cd6 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -3,18 +3,16 @@ package client // import "github.com/docker/docker/client" import ( "bufio" "context" - "crypto/tls" "fmt" "net" "net/http" - "net/http/httputil" "net/url" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) // postHijacked sends a POST request and hijacks the connection. @@ -23,11 +21,11 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - req, err := cli.buildRequest(http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) + req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) if err != nil { return types.HijackedResponse{}, err } - conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") + conn, mediaType, err := cli.setupHijackConn(req, "tcp") if err != nil { return types.HijackedResponse{}, err } @@ -37,29 +35,18 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu // DialHijack returns a hijacked connection with negotiated protocol proto. func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest(http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { return nil, err } req = cli.addHeaders(req, meta) - conn, _, err := cli.setupHijackConn(ctx, req, proto) + conn, _, err := cli.setupHijackConn(req, proto) return conn, err } -// fallbackDial is used when WithDialer() was not called. -// See cli.Dialer(). -func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - return tls.Dial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { +func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { + ctx := req.Context() req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -68,6 +55,11 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto if err != nil { return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } + defer func() { + if retErr != nil { + conn.Close() + } + }() // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain @@ -79,35 +71,29 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) } - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() + hc := &hijackedConn{conn, bufio.NewReader(conn)} // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - - //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, "", err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - _ = resp.Body.Close() - return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } + resp, err := otelhttp.NewTransport(hc).RoundTrip(req) + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + _ = resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } - c, br := clientconn.Hijack() - if br.Buffered() > 0 { + if hc.r.Buffered() > 0 { // If there is buffered content, wrap the connection. We return an // object that implements CloseWrite if the underlying connection // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + if _, ok := hc.Conn.(types.CloseWriter); ok { + conn = &hijackedConnCloseWriter{hc} } else { - c = &hijackedConn{c, br} + conn = hc } } else { - br.Reset(nil) + hc.r.Reset(nil) } var mediaType string @@ -116,7 +102,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto mediaType = resp.Header.Get("Content-Type") } - return c, mediaType, nil + return conn, mediaType, nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case @@ -128,6 +114,13 @@ type hijackedConn struct { r *bufio.Reader } +func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := req.Write(c.Conn); err != nil { + return nil, err + } + return http.ReadResponse(c.r, req) +} + func (c *hijackedConn) Read(b []byte) (int, error) { return c.r.Read(b) } diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index d16e1d8ea9..d294ddc8b2 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -18,18 +18,18 @@ import ( // The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) + query, err := cli.imageBuildOptionsToQuery(ctx, options) if err != nil { return types.ImageBuildResponse{}, err } - headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(options.AuthConfigs) if err != nil { return types.ImageBuildResponse{}, err } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers := http.Header{} + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) @@ -37,15 +37,13 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } - osType := getDockerOS(serverResp.header.Get("Server")) - return types.ImageBuildResponse{ Body: serverResp.body, - OSType: osType, + OSType: getDockerOS(serverResp.header.Get("Server")), }, nil } -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { +func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, @@ -75,7 +73,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur } if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "squash"); err != nil { return query, err } query.Set("squash", "1") @@ -125,7 +123,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("session", options.SessionID) } if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { + if err := cli.NewVersionError(ctx, "1.32", "platform"); err != nil { return query, err } query.Set("platform", strings.ToLower(options.Platform)) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 6a9b708f7d..29cd0b4373 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -3,10 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" ) @@ -33,6 +34,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti } func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) + return cli.post(ctx, "/images/create", query, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index c5de42cb79..cd376a14e5 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 950d513334..f3f2280e32 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -7,12 +7,20 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/versions" ) // ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + var images []image.Summary query := url.Values{} optionFilters := options.Filters diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index 91016e493c..c825206ea5 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" "github.com/docker/docker/api/types" @@ -17,8 +18,9 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) ( if quiet { v.Set("quiet", "1") } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ + "Content-Type": {"application/x-tar"}, + }) if err != nil { return types.ImageLoadResponse{}, err } diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 56af6d7f98..6b82d6ab6c 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { var report types.ImagesPruneReport - if err := cli.NewVersionError("1.25", "image prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index a23975591b..d92049d588 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" ) diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index dd1b8f3471..6839a89e07 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -4,9 +4,10 @@ import ( "context" "errors" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -50,6 +51,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im } func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 6a9fb3f41f..b936d20830 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -6,10 +6,11 @@ import ( "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) { query := url.Values{} if options.Force { @@ -19,7 +20,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } - var dels []types.ImageDeleteResponseItem + var dels []image.DeleteResponse resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 5f0c49ed30..8971b139ae 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "strconv" @@ -48,6 +49,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) + return cli.get(ctx, "/images/search", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index 5652bfc252..ea6b4a1e65 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -4,7 +4,7 @@ import ( "context" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index c856704e23..cc3fcc4670 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -6,12 +6,12 @@ import ( "fmt" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/system" ) // Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info +func (cli *Client) Info(ctx context.Context) (system.Info, error) { + var info system.Info serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) defer ensureReaderClosed(serverResp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 7993c5a48f..302f5fb13e 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/system" "github.com/docker/docker/api/types/volume" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -45,30 +46,30 @@ type CommonAPIClient interface { // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error ContainerRestart(ctx context.Context, container string, options container.StopOptions) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error @@ -93,11 +94,11 @@ type ImageAPIClient interface { ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error @@ -140,13 +141,13 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) } @@ -165,7 +166,7 @@ type SwarmAPIClient interface { // SystemAPIClient defines API client methods for the system type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) + Info(ctx context.Context) (system.Info, error) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) Ping(ctx context.Context) (types.Ping, error) diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 402ffb512c..c585c10459 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -3,7 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) type apiClientExperimental interface { @@ -12,7 +12,7 @@ type apiClientExperimental interface { // CheckpointAPIClient defines API client methods for the checkpoints type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) + CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error + CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error + CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 278d9383a8..668e87d653 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -5,14 +5,26 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" ) // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + networkCreateRequest := types.NetworkCreateRequest{ NetworkCreate: options, Name: name, } + if versions.LessThan(cli.version, "1.44") { + networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. + } + var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index cebb188219..7b5f831ef7 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { var report types.NetworksPruneReport - if err := cli.NewVersionError("1.25", "network prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index c212906bc7..1a9e6bfb1b 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -16,7 +16,6 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) if options.Filters.Len() > 0 { filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 099ad41846..ddb0ca3991 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -11,25 +11,25 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" + "go.opentelemetry.io/otel/trace" ) -// Opt is a configuration option to initialize a client +// Opt is a configuration option to initialize a [Client]. type Opt func(*Client) error -// FromEnv configures the client with values from environment variables. +// FromEnv configures the client with values from environment variables. It +// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], +// and [WithVersionFromEnv] options. // // FromEnv uses the following environment variables: // -// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. -// -// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to -// use, leave empty for latest. -// -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). +// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. +// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the +// API to use, leave empty for latest. +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). func FromEnv(c *Client) error { ops := []Opt{ WithTLSClientConfigFromEnv(), @@ -45,7 +45,8 @@ func FromEnv(c *Client) error { } // WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. +// used to set the Timeout and KeepAlive settings of the client. It returns +// an error if the client does not have a [http.Transport] configured. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { return func(c *Client) error { if transport, ok := c.client.Transport.(*http.Transport); ok { @@ -75,7 +76,7 @@ func WithHost(host string) Opt { } // WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, // or set to an empty value, the host is not modified. func WithHostFromEnv() Opt { return func(c *Client) error { @@ -86,7 +87,7 @@ func WithHostFromEnv() Opt { } } -// WithHTTPClient overrides the client http client with the specified one +// WithHTTPClient overrides the client's HTTP client with the specified one. func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { if client != nil { @@ -96,7 +97,7 @@ func WithHTTPClient(client *http.Client) Opt { } } -// WithTimeout configures the time limit for requests made by the HTTP client +// WithTimeout configures the time limit for requests made by the HTTP client. func WithTimeout(timeout time.Duration) Opt { return func(c *Client) error { c.client.Timeout = timeout @@ -104,7 +105,19 @@ func WithTimeout(timeout time.Duration) Opt { } } -// WithHTTPHeaders overrides the client default http headers +// WithUserAgent configures the User-Agent header to use for HTTP requests. +// It overrides any User-Agent set in headers. When set to an empty string, +// the User-Agent header is removed, and no header is sent. +func WithUserAgent(ua string) Opt { + return func(c *Client) error { + c.userAgent = &ua + return nil + } +} + +// WithHTTPHeaders appends custom HTTP headers to the client's default headers. +// It does not allow for built-in headers (such as "User-Agent", if set) to +// be overridden. Also see [WithUserAgent]. func WithHTTPHeaders(headers map[string]string) Opt { return func(c *Client) error { c.customHTTPHeaders = headers @@ -112,7 +125,7 @@ func WithHTTPHeaders(headers map[string]string) Opt { } } -// WithScheme overrides the client scheme with the specified one +// WithScheme overrides the client scheme with the specified one. func WithScheme(scheme string) Opt { return func(c *Client) error { c.scheme = scheme @@ -120,51 +133,50 @@ func WithScheme(scheme string) Opt { } } -// WithTLSClientConfig applies a tls config to the client transport. +// WithTLSClientConfig applies a TLS config to the client transport. func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { return func(c *Client) error { - opts := tlsconfig.Options{ + transport, ok := c.client.Transport.(*http.Transport) + if !ok { + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } + config, err := tlsconfig.Client(tlsconfig.Options{ CAFile: cacertPath, CertFile: certPath, KeyFile: keyPath, ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) + }) if err != nil { return errors.Wrap(err, "failed to create tls config") } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + transport.TLSClientConfig = config + return nil } } // WithTLSClientConfigFromEnv configures the client's TLS settings with the -// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. -// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY +// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, +// TLS configuration is not modified. // // WithTLSClientConfigFromEnv uses the following environment variables: // -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). func WithTLSClientConfigFromEnv() Opt { return func(c *Client) error { dockerCertPath := os.Getenv(EnvOverrideCertPath) if dockerCertPath == "" { return nil } - options := tlsconfig.Options{ + tlsc, err := tlsconfig.Client(tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, "ca.pem"), CertFile: filepath.Join(dockerCertPath, "cert.pem"), KeyFile: filepath.Join(dockerCertPath, "key.pem"), InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", - } - tlsc, err := tlsconfig.Client(options) + }) if err != nil { return err } @@ -178,7 +190,8 @@ func WithTLSClientConfigFromEnv() Opt { } // WithVersion overrides the client version with the specified one. If an empty -// version is specified, the value will be ignored to allow version negotiation. +// version is provided, the value is ignored to allow version negotiation +// (see [WithAPIVersionNegotiation]). func WithVersion(version string) Opt { return func(c *Client) error { if version != "" { @@ -190,8 +203,9 @@ func WithVersion(version string) Opt { } // WithVersionFromEnv overrides the client version with the version specified in -// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, -// the version is not modified. +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// If DOCKER_API_VERSION is not set, or set to an empty value, the version +// is not modified. func WithVersionFromEnv() Opt { return func(c *Client) error { return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) @@ -201,10 +215,19 @@ func WithVersionFromEnv() Opt { // WithAPIVersionNegotiation enables automatic API version negotiation for the client. // With this option enabled, the client automatically negotiates the API version // to use when making requests. API version negotiation is performed on the first -// request; subsequent requests will not re-negotiate. +// request; subsequent requests do not re-negotiate. func WithAPIVersionNegotiation() Opt { return func(c *Client) error { c.negotiateVersion = true return nil } } + +// WithTraceProvider sets the trace provider for the client. +// If this is not set then the global trace provider will be used. +func WithTraceProvider(provider trace.TracerProvider) Opt { + return func(c *Client) error { + c.tp = provider + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 347ae71e02..dfd1042fab 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -21,11 +21,11 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } - serverResp, err := cli.doRequest(ctx, req) + serverResp, err := cli.doRequest(req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { @@ -37,11 +37,9 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { return ping, err } - req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err = cli.doRequest(ctx, req) + // HEAD failed; fallback to GET. + req.Method = http.MethodGet + serverResp, err = cli.doRequest(req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 3a740ec4f6..69184619a2 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -4,9 +4,10 @@ import ( "context" "encoding/json" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -68,13 +69,15 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types } func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) + return cli.get(ctx, "/plugins/privileges", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) + return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 18f9754c4c..8f68a86eee 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -3,14 +3,16 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "github.com/docker/docker/api/types/registry" ) // PluginPush pushes a plugin to a registry func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 995d1fd2ca..5cade450f4 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" @@ -13,7 +14,7 @@ import ( // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { return nil, err } query := url.Values{} @@ -35,6 +36,7 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types } func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index bcedcf3bd9..efe07bb9ea 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "os" + "reflect" "strings" "github.com/docker/docker/api/types" @@ -27,17 +28,17 @@ type serverResponse struct { } // head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) } // get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) } // post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { return serverResponse{}, err @@ -45,34 +46,44 @@ func (cli *Client) post(ctx context.Context, path string, query url.Values, obj return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { return serverResponse{}, err } - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) + return cli.putRaw(ctx, path, query, body, headers) } // putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { + // PUT requests are expected to always have a body (apparently) + // so explicitly pass an empty body to sendRequest to signal that + // it should set the Content-Type header if not already present. + if body == nil { + body = http.NoBody + } return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) } // delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) } -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { +func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) { if obj == nil { return nil, headers, nil } + // encoding/json encodes a nil pointer as the JSON document `null`, + // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. + // That is almost certainly not what the caller intended as the request body. + if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { + return nil, headers, nil + } body, err := encodeData(obj) if err != nil { @@ -85,13 +96,8 @@ func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { return body, headers, nil } -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == http.MethodPost || method == http.MethodPut) - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) +func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, method, path, body) if err != nil { return nil, err } @@ -104,19 +110,19 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea req.Host = DummyHost } - if expectedPayload && req.Header.Get("Content-Type") == "" { + if body != nil && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } return req, nil } -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { + req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } - resp, err := cli.doRequest(ctx, req) + resp, err := cli.doRequest(req) switch { case errors.Is(err, context.Canceled): return serverResponse{}, errdefs.Cancelled(err) @@ -128,10 +134,9 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u return resp, errdefs.FromStatusCode(err, resp.statusCode) } -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { +func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - req = req.WithContext(ctx) resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { @@ -148,19 +153,19 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp return serverResp, err } - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { + if uErr, ok := err.(*url.Error); ok { + if nErr, ok := uErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) } } } - if err, ok := err.(net.Error); ok { - if err.Timeout() { + if nErr, ok := err.(net.Error); ok { + if nErr.Timeout() { return serverResp, ErrorConnectionFailed(cli.host) } - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { return serverResp, ErrorConnectionFailed(cli.host) } } @@ -221,26 +226,20 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) } - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var daemonErr error + if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") } - errorMessage = strings.TrimSpace(errorResponse.Message) + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) } else { - errorMessage = strings.TrimSpace(string(body)) + daemonErr = errors.New(strings.TrimSpace(string(body))) } - - return errors.Wrap(errors.New(errorMessage), "Error response from daemon") + return errors.Wrap(daemonErr, "Error response from daemon") } -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { +func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { @@ -253,6 +252,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request for k, v := range headers { req.Header[http.CanonicalHeaderKey(k)] = v } + + if cli.userAgent != nil { + if *cli.userAgent == "" { + req.Header.Del("User-Agent") + } else { + req.Header.Set("User-Agent", *cli.userAgent) + } + } return req } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index c65d38a191..7b7f1ba740 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -11,7 +11,7 @@ import ( // SecretCreate creates a new secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { return response, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index 5906874b15..a9cb59889b 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -11,7 +11,7 @@ import ( // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { return swarm.Secret{}, nil, err } if id == "" { diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index a0289c9f44..4d21639ef6 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -12,7 +12,7 @@ import ( // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { return nil, err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index f47f68b6e0..079ed67394 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -4,7 +4,7 @@ import "context" // SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 2e939e8ced..9dfe67198b 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -9,7 +9,7 @@ import ( // SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { return err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index b6065b8eef..2ebb5ee3a5 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -4,26 +4,28 @@ import ( "context" "encoding/json" "fmt" + "net/http" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) // ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var response types.ServiceCreateResponse - headers := map[string][]string{ - "version": {cli.version}, - } +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { + var response swarm.ServiceCreateResponse - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { @@ -53,6 +55,16 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, } } + headers := http.Header{} + if versions.LessThan(cli.version, "1.30") { + // the custom "version" header was used by engine API before 20.10 + // (API 1.30) to switch between client- and server-side lookup of + // image digests. + headers["version"] = []string{cli.version} + } + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } resp, err := cli.post(ctx, "/services/create", nil, service, headers) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index 906fd4059e..e9e30a2ab4 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -6,14 +6,14 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index ff8cded8be..e05eebf566 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -3,30 +3,31 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" ) // ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + var ( query = url.Values{} - response = types.ServiceUpdateResponse{} + response = swarm.ServiceUpdateResponse{} ) - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - if options.RegistryAuthFrom != "" { query.Set("registryAuthFrom", options.RegistryAuthFrom) } @@ -60,6 +61,16 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version } } + headers := http.Header{} + if versions.LessThan(cli.version, "1.30") { + // the custom "version" header was used by engine API before 20.10 + // (API 1.30) to switch between client- and server-side lookup of + // image digests. + headers["version"] = []string{cli.version} + } + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go index 6222fab577..b8c20e71da 100644 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -6,13 +6,13 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" ) // TaskLogs returns the logs generated by a task in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 5541344366..0000000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 6e324708f2..9333f6ee78 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { var report types.VolumesPruneReport - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 1f26438360..31e08cb975 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -10,8 +10,14 @@ import ( // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { + if force { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { query.Set("force", "1") } } diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go index 33bd31e531..151863f07a 100644 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -11,7 +11,7 @@ import ( // VolumeUpdate updates a volume. This only works for Cluster Volumes, and // only some fields can be updated. func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - if err := cli.NewVersionError("1.42", "volume update"); err != nil { + if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/container/attach_context.go b/vendor/github.com/docker/docker/container/attach_context.go new file mode 100644 index 0000000000..5a7d0748f0 --- /dev/null +++ b/vendor/github.com/docker/docker/container/attach_context.go @@ -0,0 +1,35 @@ +package container + +import ( + "context" + "sync" +) + +// attachContext is the context used for for attach calls. +type attachContext struct { + mu sync.Mutex + ctx context.Context + cancelFunc context.CancelFunc +} + +// init returns the context for attach calls. It creates a new context +// if no context is created yet. +func (ac *attachContext) init() context.Context { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.ctx == nil { + ac.ctx, ac.cancelFunc = context.WithCancel(context.Background()) + } + return ac.ctx +} + +// cancelFunc cancels the attachContext. All attach calls should detach +// after this call. +func (ac *attachContext) cancel() { + ac.mu.Lock() + if ac.ctx != nil { + ac.cancelFunc() + ac.ctx = nil + } + ac.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go index e04696bd65..e73f05654f 100644 --- a/vendor/github.com/docker/docker/container/container.go +++ b/vendor/github.com/docker/docker/container/container.go @@ -10,12 +10,13 @@ import ( "path/filepath" "runtime" "strings" - "sync" "syscall" "time" "github.com/containerd/containerd/cio" + "github.com/containerd/log" containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" mounttypes "github.com/docker/docker/api/types/mount" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/container/stream" @@ -41,7 +42,6 @@ import ( "github.com/moby/sys/symlink" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -175,7 +175,7 @@ func (container *Container) toDisk() (*Container, error) { } // Save container settings - f, err := ioutils.NewAtomicFileWriter(pth, 0600) + f, err := ioutils.NewAtomicFileWriter(pth, 0o600) if err != nil { return nil, err } @@ -250,7 +250,7 @@ func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error return nil, err } - f, err := ioutils.NewAtomicFileWriter(pth, 0600) + f, err := ioutils.NewAtomicFileWriter(pth, 0o600) if err != nil { return nil, err } @@ -267,6 +267,32 @@ func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error return &deepCopy, nil } +// CommitInMemory makes the Container's current state visible to queries, +// but does not persist state. +// +// Callers must hold a Container lock. +func (container *Container) CommitInMemory(store *ViewDB) error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(container); err != nil { + return err + } + + var deepCopy Container + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return err + } + + buf.Reset() + if err := json.NewEncoder(&buf).Encode(container.HostConfig); err != nil { + return err + } + if err := json.NewDecoder(&buf).Decode(&deepCopy.HostConfig); err != nil { + return err + } + + return store.Save(&deepCopy) +} + // SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity) error { if container.Config.WorkingDir == "" { @@ -279,7 +305,7 @@ func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity) return err } - if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIdentity); err != nil { + if err := idtools.MkdirAllAndChownNew(pth, 0o755, rootIdentity); err != nil { pthInfo, err2 := os.Stat(pth) if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) @@ -310,14 +336,14 @@ func (container *Container) GetResourcePath(path string) (string, error) { return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly empty") } // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - r, e := containerfs.ResolveScopedPath(container.BaseFS, containerfs.CleanScopedPath(path)) + // any filepath operations must be done in an OS-agnostic way. + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, containerfs.CleanScopedPath(path)), container.BaseFS) // Log this here on the daemon side as there's otherwise no indication apart // from the error being propagated all the way back to the client. This makes // debugging significantly easier and clearly indicates the error comes from the daemon. if e != nil { - logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS, path, e) + log.G(context.TODO()).Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS, path, e) } return r, e } @@ -402,7 +428,7 @@ func (container *Container) StartLogger() (logger.Logger, error) { if err != nil { return nil, err } - if err := os.MkdirAll(logDir, 0700); err != nil { + if err := os.MkdirAll(logDir, 0o700); err != nil { return nil, errdefs.System(errors.Wrap(err, "error creating local logs dir")) } info.LogPath = filepath.Join(logDir, "container.log") @@ -432,7 +458,7 @@ func (container *Container) StartLogger() (logger.Logger, error) { } if !container.LocalLogCacheMeta.HaveNotifyEnabled { - logrus.WithField("container", container.ID).WithField("driver", container.HostConfig.LogConfig.Type).Info("Configured log driver does not support reads, enabling local file cache for container logs") + log.G(context.TODO()).WithField("container", container.ID).WithField("driver", container.HostConfig.LogConfig.Type).Info("Configured log driver does not support reads, enabling local file cache for container logs") container.LocalLogCacheMeta.HaveNotifyEnabled = true } info.LogPath = logPath @@ -488,26 +514,24 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu } // UnmountVolumes unmounts all volumes -func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { - var errors []string +func (container *Container) UnmountVolumes(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error { + var errs []string for _, volumeMount := range container.MountPoints { if volumeMount.Volume == nil { continue } if err := volumeMount.Cleanup(); err != nil { - errors = append(errors, err.Error()) + errs = append(errs, err.Error()) continue } - - attributes := map[string]string{ + volumeEventLog(volumeMount.Volume.Name(), events.ActionUnmount, map[string]string{ "driver": volumeMount.Volume.DriverName(), "container": container.ID, - } - volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + }) } - if len(errors) > 0 { - return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + if len(errs) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errs, "; ")) } return nil } @@ -595,32 +619,15 @@ func (container *Container) ResetRestartManager(resetCount bool) { container.restartManager = nil } -type attachContext struct { - ctx context.Context - cancel context.CancelFunc - mu sync.Mutex -} - -// InitAttachContext initializes or returns existing context for attach calls to -// track container liveness. -func (container *Container) InitAttachContext() context.Context { - container.attachContext.mu.Lock() - defer container.attachContext.mu.Unlock() - if container.attachContext.ctx == nil { - container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) - } - return container.attachContext.ctx +// AttachContext returns the context for attach calls to track container liveness. +func (container *Container) AttachContext() context.Context { + return container.attachContext.init() } // CancelAttachContext cancels attach context. All attach calls should detach // after this call. func (container *Container) CancelAttachContext() { - container.attachContext.mu.Lock() - if container.attachContext.ctx != nil { - container.attachContext.cancel() - container.attachContext.ctx = nil - } - container.attachContext.mu.Unlock() + container.attachContext.cancel() } func (container *Container) startLogging() error { @@ -673,7 +680,7 @@ func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { if container.StreamConfig.Stdin() == nil && !container.Config.Tty { if iop.Stdin != nil { if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("error closing stdin: %+v", err) + log.G(context.TODO()).Warnf("error closing stdin: %+v", err) } } } diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go index 3dced869c4..80cf5e58dd 100644 --- a/vendor/github.com/docker/docker/container/container_unix.go +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -1,16 +1,18 @@ //go:build !windows -// +build !windows package container // import "github.com/docker/docker/container" import ( + "context" "os" "path/filepath" "syscall" "github.com/containerd/continuity/fs" + "github.com/containerd/log" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" mounttypes "github.com/docker/docker/api/types/mount" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/pkg/stringid" @@ -19,7 +21,6 @@ import ( "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -60,17 +61,19 @@ func (container *Container) BuildHostnameFile() error { return err } container.HostnamePath = hostnamePath - return os.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) + return os.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0o644) } // NetworkMounts returns the list of network mounts. func (container *Container) NetworkMounts() []Mount { + ctx := context.TODO() + var mounts []Mount shared := container.HostConfig.NetworkMode.IsContainer() parser := volumemounts.NewParser() if container.ResolvConfPath != "" { if _, err := os.Stat(container.ResolvConfPath); err != nil { - logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + log.G(ctx).Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) } else { writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { @@ -88,7 +91,7 @@ func (container *Container) NetworkMounts() []Mount { } if container.HostnamePath != "" { if _, err := os.Stat(container.HostnamePath); err != nil { - logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + log.G(ctx).Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) } else { writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hostname"]; exists { @@ -106,7 +109,7 @@ func (container *Container) NetworkMounts() []Mount { } if container.HostsPath != "" { if _, err := os.Stat(container.HostsPath); err != nil { - logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + log.G(ctx).Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) } else { writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hosts"]; exists { @@ -147,7 +150,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st defer func() { if err := v.Unmount(id); err != nil { - logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + log.G(context.TODO()).Warnf("error while unmounting volume %s: %v", v.Name(), err) } }() if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) { @@ -363,14 +366,16 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi // DetachAndUnmount uses a detached mount on all mount destinations, then // unmounts each volume normally. // This is used from daemon/archive for `docker cp` -func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { +func (container *Container) DetachAndUnmount(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error { + ctx := context.TODO() + networkMounts := container.NetworkMounts() mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) for _, mntPoint := range container.MountPoints { dest, err := container.GetResourcePath(mntPoint.Destination) if err != nil { - logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + log.G(ctx).Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) continue } mountPaths = append(mountPaths, dest) @@ -379,7 +384,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st for _, m := range networkMounts { dest, err := container.GetResourcePath(m.Destination) if err != nil { - logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + log.G(ctx).Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) continue } mountPaths = append(mountPaths, dest) @@ -387,7 +392,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st for _, mountPath := range mountPaths { if err := mount.Unmount(mountPath); err != nil { - logrus.WithError(err).WithField("container", container.ID). + log.G(ctx).WithError(err).WithField("container", container.ID). Warn("Unable to unmount") } } diff --git a/vendor/github.com/docker/docker/container/container_windows.go b/vendor/github.com/docker/docker/container/container_windows.go index 8229480df7..bceedcb637 100644 --- a/vendor/github.com/docker/docker/container/container_windows.go +++ b/vendor/github.com/docker/docker/container/container_windows.go @@ -7,6 +7,7 @@ import ( "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/pkg/system" ) @@ -126,7 +127,7 @@ func (container *Container) ConfigMounts() []Mount { // DetachAndUnmount unmounts all volumes. // On Windows it only delegates to `UnmountVolumes` since there is nothing to // force unmount. -func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { +func (container *Container) DetachAndUnmount(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error { return container.UnmountVolumes(volumeEventLog) } diff --git a/vendor/github.com/docker/docker/container/exec.go b/vendor/github.com/docker/docker/container/exec.go index 18e86c6a4f..328b596b04 100644 --- a/vendor/github.com/docker/docker/container/exec.go +++ b/vendor/github.com/docker/docker/container/exec.go @@ -1,14 +1,15 @@ package container // import "github.com/docker/docker/container" import ( + "context" "runtime" "sync" "github.com/containerd/containerd/cio" + "github.com/containerd/log" "github.com/docker/docker/container/stream" "github.com/docker/docker/libcontainerd/types" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) // ExecConfig holds the configurations for execs. The Daemon keeps @@ -55,7 +56,7 @@ func (c *ExecConfig) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { if iop.Stdin != nil { if err := iop.Stdin.Close(); err != nil { - logrus.Errorf("error closing exec stdin: %+v", err) + log.G(context.TODO()).Errorf("error closing exec stdin: %+v", err) } } } diff --git a/vendor/github.com/docker/docker/container/health.go b/vendor/github.com/docker/docker/container/health.go index 3e93142b98..ea354315cc 100644 --- a/vendor/github.com/docker/docker/container/health.go +++ b/vendor/github.com/docker/docker/container/health.go @@ -1,10 +1,11 @@ package container // import "github.com/docker/docker/container" import ( + "context" "sync" + "github.com/containerd/log" "github.com/docker/docker/api/types" - "github.com/sirupsen/logrus" ) // Health holds the current container health-check state @@ -59,7 +60,7 @@ func (s *Health) OpenMonitorChannel() chan struct{} { defer s.mu.Unlock() if s.stop == nil { - logrus.Debug("OpenMonitorChannel") + log.G(context.TODO()).Debug("OpenMonitorChannel") s.stop = make(chan struct{}) return s.stop } @@ -72,11 +73,11 @@ func (s *Health) CloseMonitorChannel() { defer s.mu.Unlock() if s.stop != nil { - logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + log.G(context.TODO()).Debug("CloseMonitorChannel: waiting for probe to stop") close(s.stop) s.stop = nil // unhealthy when the monitor has stopped for compatibility reasons s.Health.Status = types.Unhealthy - logrus.Debug("CloseMonitorChannel done") + log.G(context.TODO()).Debug("CloseMonitorChannel done") } } diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go index ff4b3439e5..60a6228ee4 100644 --- a/vendor/github.com/docker/docker/container/monitor.go +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -1,9 +1,10 @@ package container // import "github.com/docker/docker/container" import ( + "context" "time" - "github.com/sirupsen/logrus" + "github.com/containerd/log" ) const ( @@ -18,7 +19,7 @@ func (container *Container) Reset(lock bool) { } if err := container.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", container.ID, err) + log.G(context.TODO()).Errorf("%s: %s", container.ID, err) } // Re-create a brand new stdin pipe once the container exited @@ -38,7 +39,7 @@ func (container *Container) Reset(lock bool) { defer timer.Stop() select { case <-timer.C: - logrus.Warn("Logger didn't exit in time: logs may be truncated") + log.G(context.TODO()).Warn("Logger didn't exit in time: logs may be truncated") case <-exit: } } diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go index 168286889a..14c8d36c12 100644 --- a/vendor/github.com/docker/docker/container/mounts_unix.go +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -1,14 +1,15 @@ //go:build !windows -// +build !windows package container // import "github.com/docker/docker/container" // Mount contains information for a mount operation. type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` - Data string `json:"data"` - Propagation string `json:"mountpropagation"` - NonRecursive bool `json:"nonrecursive"` + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` + NonRecursive bool `json:"nonrecursive"` + ReadOnlyNonRecursive bool `json:"readonlynonrecursive"` + ReadOnlyForceRecursive bool `json:"readonlyforcerecursive"` } diff --git a/vendor/github.com/docker/docker/container/stream/attach.go b/vendor/github.com/docker/docker/container/stream/attach.go index 0269a226b1..6079d20d1e 100644 --- a/vendor/github.com/docker/docker/container/stream/attach.go +++ b/vendor/github.com/docker/docker/container/stream/attach.go @@ -4,10 +4,10 @@ import ( "context" "io" + "github.com/containerd/log" "github.com/docker/docker/pkg/pools" "github.com/moby/term" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -63,8 +63,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro // Connect stdin of container to the attach stdin stream. if cfg.Stdin != nil { group.Go(func() error { - logrus.Debug("attach: stdin: begin") - defer logrus.Debug("attach: stdin: end") + log.G(ctx).Debug("attach: stdin: begin") + defer log.G(ctx).Debug("attach: stdin: end") defer func() { if cfg.CloseStdin && !cfg.TTY { @@ -90,7 +90,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro err = nil } if err != nil { - logrus.WithError(err).Debug("error on attach stdin") + log.G(ctx).WithError(err).Debug("error on attach stdin") return errors.Wrap(err, "error on attach stdin") } return nil @@ -98,8 +98,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro } attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error { - logrus.Debugf("attach: %s: begin", name) - defer logrus.Debugf("attach: %s: end", name) + log.G(ctx).Debugf("attach: %s: begin", name) + defer log.G(ctx).Debugf("attach: %s: end", name) defer func() { // Make sure stdin gets closed if cfg.Stdin != nil { @@ -113,7 +113,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro err = nil } if err != nil { - logrus.WithError(err).Debugf("attach: %s", name) + log.G(ctx).WithError(err).Debugf("attach: %s", name) return errors.Wrapf(err, "error attaching %s stream", name) } return nil @@ -132,7 +132,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro errs := make(chan error, 1) go func() { - defer logrus.Debug("attach done") + defer log.G(ctx).Debug("attach done") groupErr := make(chan error, 1) go func() { groupErr <- group.Wait() diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go index 83e6ded611..78ec048396 100644 --- a/vendor/github.com/docker/docker/container/stream/streams.go +++ b/vendor/github.com/docker/docker/container/stream/streams.go @@ -8,10 +8,10 @@ import ( "sync" "github.com/containerd/containerd/cio" + "github.com/containerd/log" "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" - "github.com/sirupsen/logrus" ) // Config holds information about I/O streams managed together. @@ -116,12 +116,14 @@ func (c *Config) CloseStreams() error { // CopyToPipe connects streamconfig with a libcontainerd.IOPipe func (c *Config) CopyToPipe(iop *cio.DirectIO) { + ctx := context.TODO() + c.dio = iop copyFunc := func(w io.Writer, r io.ReadCloser) { c.wg.Add(1) go func() { if _, err := pools.Copy(w, r); err != nil { - logrus.Errorf("stream copy error: %v", err) + log.G(ctx).Errorf("stream copy error: %v", err) } r.Close() c.wg.Done() @@ -140,7 +142,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) { go func() { pools.Copy(iop.Stdin, stdin) if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("failed to close stdin: %v", err) + log.G(ctx).Warnf("failed to close stdin: %v", err) } }() } diff --git a/vendor/github.com/docker/docker/container/view.go b/vendor/github.com/docker/docker/container/view.go index 9d300cb4ce..5487beacaf 100644 --- a/vendor/github.com/docker/docker/container/view.go +++ b/vendor/github.com/docker/docker/container/view.go @@ -2,17 +2,18 @@ package container // import "github.com/docker/docker/container" import ( "bytes" + "context" "errors" "fmt" "strings" "time" + "github.com/containerd/log" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" "github.com/docker/docker/errdefs" "github.com/docker/go-connections/nat" memdb "github.com/hashicorp/go-memdb" - "github.com/sirupsen/logrus" ) const ( @@ -387,7 +388,7 @@ func (v *View) transform(container *Container) *Snapshot { for port, bindings := range container.NetworkSettings.Ports { p, err := nat.ParsePort(port.Port()) if err != nil { - logrus.WithError(err).Warn("invalid port map") + log.G(context.TODO()).WithError(err).Warn("invalid port map") continue } if len(bindings) == 0 { @@ -400,7 +401,7 @@ func (v *View) transform(container *Container) *Snapshot { for _, binding := range bindings { h, err := nat.ParsePort(binding.HostPort) if err != nil { - logrus.WithError(err).Warn("invalid host port map") + log.G(context.TODO()).WithError(err).Warn("invalid host port map") continue } snapshot.Ports = append(snapshot.Ports, types.Port{ diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go index 2853fa9682..6ddde44b89 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -1,17 +1,18 @@ package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( + "context" "fmt" "io" "os" "path/filepath" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" ) @@ -23,10 +24,8 @@ const ( FsMagicUnsupported = FsMagic(0x00000000) ) -var ( - // All registered drivers - drivers map[string]InitFunc -) +// All registered drivers +var drivers map[string]InitFunc // CreateOpts contains optional arguments for Create() and CreateReadWrite() // methods. @@ -168,7 +167,7 @@ func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Drive if err == nil { return pluginDriver, nil } - logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + log.G(context.TODO()).WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") return nil, ErrNotSupported } @@ -177,7 +176,7 @@ func getBuiltinDriver(name, home string, options []string, idMap idtools.Identit if initFunc, exists := drivers[name]; exists { return initFunc(filepath.Join(home, name), options, idMap) } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + log.G(context.TODO()).Errorf("Failed to built-in GetDriver graph %s %s", name, home) return nil, ErrNotSupported } @@ -191,21 +190,19 @@ type Options struct { // New creates the driver and initializes it at the specified root. func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + ctx := context.TODO() if name != "" { - logrus.Infof("[graphdriver] trying configured driver: %s", name) + log.G(ctx).Infof("[graphdriver] trying configured driver: %s", name) if err := checkRemoved(name); err != nil { return nil, err } - if isDeprecated(name) { - logrus.Warnf("[graphdriver] WARNING: the %s storage-driver is deprecated and will be removed in a future release; visit https://docs.docker.com/go/storage-driver/ for more information", name) - } return GetDriver(name, pg, config) } // Guess for prior driver driversMap := scanPriorDrivers(config.Root) priorityList := strings.Split(priority, ",") - logrus.Debugf("[graphdriver] priority list: %v", priorityList) + log.G(ctx).Debugf("[graphdriver] priority list: %v", priorityList) for _, name := range priorityList { if _, prior := driversMap[name]; prior { // of the state found from prior drivers, check in order of our priority @@ -216,12 +213,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err // state, and now it is no longer supported/prereq/compatible, so // something changed and needs attention. Otherwise the daemon's // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) - return nil, err - } - if isDeprecated(name) { - err = errors.Errorf("prior storage driver %s is deprecated and will be removed in a future release; update the the daemon configuration and explicitly choose this storage driver to continue using it; visit https://docs.docker.com/go/storage-driver/ for more information", name) - logrus.Errorf("[graphdriver] %v", err) + log.G(ctx).Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) return nil, err } @@ -234,11 +226,11 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err } err = errors.Errorf("%s contains several valid graphdrivers: %s; cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) - logrus.Errorf("[graphdriver] %v", err) + log.G(ctx).Errorf("[graphdriver] %v", err) return nil, err } - logrus.Infof("[graphdriver] using prior storage driver: %s", name) + log.G(ctx).Infof("[graphdriver] using prior storage driver: %s", name) return driver, nil } } @@ -246,11 +238,6 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err // If no prior state was found, continue with automatic selection, and pick // the first supported, non-deprecated, storage driver (in order of priorityList). for _, name := range priorityList { - if isDeprecated(name) { - // Deprecated storage-drivers are skipped in automatic selection, but - // can be selected through configuration. - continue - } driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.IDMap) if err != nil { if IsDriverNotSupported(err) { @@ -263,11 +250,6 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err // Check all registered drivers if no priority driver is found for name, initFunc := range drivers { - if isDeprecated(name) { - // Deprecated storage-drivers are skipped in automatic selection, but - // can be selected through configuration. - continue - } driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.IDMap) if err != nil { if IsDriverNotSupported(err) { @@ -314,20 +296,10 @@ func isEmptyDir(name string) bool { return false } -// isDeprecated checks if a storage-driver is marked "deprecated" -func isDeprecated(name string) bool { - switch name { - // NOTE: when deprecating a driver, update daemon.fillDriverInfo() accordingly - case "devicemapper": - return true - } - return false -} - // checkRemoved checks if a storage-driver has been deprecated (and removed) func checkRemoved(name string) error { switch name { - case "aufs", "overlay": + case "aufs", "devicemapper", "overlay": return NotSupportedError(fmt.Sprintf("[graphdriver] ERROR: the %s storage-driver has been deprecated and removed; visit https://docs.docker.com/go/storage-driver/ for more information", name)) } return nil diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go index cd83c4e21a..5591e3f187 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -6,10 +6,8 @@ import ( "golang.org/x/sys/unix" ) -var ( - // List of drivers that should be used in an order - priority = "zfs" -) +// List of drivers that should be used in an order +var priority = "zfs" // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go index 4341b763aa..5fc3158113 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -50,7 +50,7 @@ const ( var ( // List of drivers that should be used in an order - priority = "overlay2,fuse-overlayfs,btrfs,zfs,devicemapper,vfs" + priority = "overlay2,fuse-overlayfs,btrfs,zfs,vfs" // FsNames maps filesystem id to name of the filesystem. FsNames = map[FsMagic]string{ @@ -109,8 +109,7 @@ func NewDefaultChecker() Checker { return &defaultChecker{} } -type defaultChecker struct { -} +type defaultChecker struct{} func (c *defaultChecker) IsMounted(path string) bool { m, _ := mountinfo.Mounted(path) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go index 60aea63b9c..3100bcb57b 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -1,12 +1,9 @@ //go:build !linux && !windows && !freebsd -// +build !linux,!windows,!freebsd package graphdriver // import "github.com/docker/docker/daemon/graphdriver" -var ( - // List of drivers that should be used in an order - priority = "unsupported" -) +// List of drivers that should be used in an order +var priority = "unsupported" // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go index 856b575e75..e6470290df 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -1,9 +1,7 @@ package graphdriver // import "github.com/docker/docker/daemon/graphdriver" -var ( - // List of drivers that should be used in order - priority = "windowsfilter" -) +// List of drivers that should be used in order +var priority = "windowsfilter" // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go index 6a7b1312b7..79560686ca 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -1,21 +1,20 @@ package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( + "context" "io" "time" + "github.com/containerd/log" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" ) -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) +// ApplyUncompressedLayer defines the unpack method used by the graph +// driver. +var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer // NaiveDiffDriver takes a ProtoDriver and adds the // capability of the Diffing methods on the local file system, @@ -23,7 +22,12 @@ var ( // on the exported NewNaiveDiffDriver function below. type NaiveDiffDriver struct { ProtoDriver - idMap idtools.IdentityMapping + IDMap idtools.IdentityMapping + // If true, allow ApplyDiff to succeed in spite of failures to set + // extended attributes on the unpacked files due to the destination + // filesystem not supporting them or a lack of permissions. The + // resulting unpacked layer may be subtly broken. + BestEffortXattrs bool } // NewNaiveDiffDriver returns a fully functional driver that wraps the @@ -35,8 +39,10 @@ type NaiveDiffDriver struct { // ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) // DiffSize(id, parent string) (size int64, err error) func NewNaiveDiffDriver(driver ProtoDriver, idMap idtools.IdentityMapping) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - idMap: idMap} + return &NaiveDiffDriver{ + ProtoDriver: driver, + IDMap: idMap, + } } // Diff produces an archive of the changes between the specified @@ -80,7 +86,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err return nil, err } - archive, err := archive.ExportChanges(layerFs, changes, gdw.idMap) + archive, err := archive.ExportChanges(layerFs, changes, gdw.IDMap) if err != nil { return nil, err } @@ -136,13 +142,13 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i defer driver.Put(id) layerFs := layerRootFs - options := &archive.TarOptions{IDMap: gdw.idMap} + options := &archive.TarOptions{IDMap: gdw.IDMap, BestEffortXattrs: gdw.BestEffortXattrs} start := time.Now().UTC() - logrus.WithField("id", id).Debug("Start untar layer") + log.G(context.TODO()).WithField("id", id).Debug("Start untar layer") if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { return } - logrus.WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + log.G(context.TODO()).WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) return } diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter.go b/vendor/github.com/docker/docker/daemon/logger/adapter.go index 97d59be5e0..95ed5a859e 100644 --- a/vendor/github.com/docker/docker/daemon/logger/adapter.go +++ b/vendor/github.com/docker/docker/daemon/logger/adapter.go @@ -1,16 +1,17 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( + "context" "io" "os" "path/filepath" "sync" "time" + "github.com/containerd/log" "github.com/docker/docker/api/types/plugins/logdriver" "github.com/docker/docker/pkg/plugingetter" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // pluginAdapter takes a plugin and implements the Logger interface for logger @@ -69,10 +70,10 @@ func (a *pluginAdapter) Close() error { } if err := a.stream.Close(); err != nil { - logrus.WithError(err).Error("error closing plugin fifo") + log.G(context.TODO()).WithError(err).Error("error closing plugin fifo") } if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { - logrus.WithError(err).Error("error cleaning up plugin fifo") + log.G(context.TODO()).WithError(err).Error("error cleaning up plugin fifo") } // may be nil, especially for unit tests diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go index 30c68ea364..ac9639840e 100644 --- a/vendor/github.com/docker/docker/daemon/logger/copier.go +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -2,13 +2,14 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( "bytes" + "context" "io" "sync" "time" + "github.com/containerd/log" types "github.com/docker/docker/api/types/backend" "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" ) const ( @@ -87,7 +88,7 @@ func (c *Copier) copySrc(name string, src io.Reader) { if err != nil { if err != io.EOF { logReadsFailedCount.Inc(1) - logrus.Errorf("Error scanning log stream: %s", err) + log.G(context.TODO()).Errorf("Error scanning log stream: %s", err) return } eof = true diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go index ce861bdd8a..e3e2490a29 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -57,7 +57,7 @@ func New(info logger.Info) (logger.Logger, error) { return nil, fmt.Errorf("max-size must be a positive number") } } - var maxFiles = 1 + maxFiles := 1 if maxFileString, ok := info.Config["max-file"]; ok { var err error maxFiles, err = strconv.Atoi(maxFileString) @@ -104,7 +104,7 @@ func New(info logger.Info) (logger.Logger, error) { } } - writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, decodeFunc, 0640, getTailReader) + writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, decodeFunc, 0o640, getTailReader) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go index 577c718f63..d7e2e37184 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go @@ -20,7 +20,7 @@ type JSONLogs struct { // MarshalJSONBuf is an optimized JSON marshaller that avoids reflection // and unnecessary allocation. func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true + first := true buf.WriteString(`{`) if len(mj.Log) != 0 { diff --git a/vendor/github.com/docker/docker/daemon/logger/local/local.go b/vendor/github.com/docker/docker/daemon/logger/local/local.go index e5ab8d74f6..252d79500d 100644 --- a/vendor/github.com/docker/docker/daemon/logger/local/local.go +++ b/vendor/github.com/docker/docker/daemon/logger/local/local.go @@ -138,7 +138,7 @@ func newDriver(logPath string, cfg *CreateConfig) (logger.Logger, error) { return nil, errdefs.InvalidParameter(err) } - lf, err := loggerutils.NewLogFile(logPath, cfg.MaxFileSize, cfg.MaxFileCount, !cfg.DisableCompression, decodeFunc, 0640, getTailReader) + lf, err := loggerutils.NewLogFile(logPath, cfg.MaxFileSize, cfg.MaxFileCount, !cfg.DisableCompression, decodeFunc, 0o640, getTailReader) if err != nil { return nil, err } @@ -187,7 +187,7 @@ func messageToProto(msg *logger.Message, proto *logdriver.LogEntry, partial *log func protoToMessage(proto *logdriver.LogEntry) *logger.Message { msg := &logger.Message{ Source: proto.Source, - Timestamp: time.Unix(0, proto.TimeNano), + Timestamp: time.Unix(0, proto.TimeNano).UTC(), } if proto.Partial { var md backend.PartialLogMetaData diff --git a/vendor/github.com/docker/docker/daemon/logger/logger_error.go b/vendor/github.com/docker/docker/daemon/logger/logger_error.go index 70f4311979..fb72aba442 100644 --- a/vendor/github.com/docker/docker/daemon/logger/logger_error.go +++ b/vendor/github.com/docker/docker/daemon/logger/logger_error.go @@ -1,7 +1,9 @@ package logger import ( - "github.com/sirupsen/logrus" + "context" + + "github.com/containerd/log" "golang.org/x/time/rate" ) @@ -16,7 +18,7 @@ var logErrorLimiter = rate.NewLimiter(333, 333) func logDriverError(loggerName, msgLine string, logErr error) { logWritesFailedCount.Inc(1) if logErrorLimiter.Allow() { - logrus.WithError(logErr). + log.G(context.TODO()).WithError(logErr). WithField("driver", loggerName). WithField("message", msgLine). Errorf("Error writing log message") diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go index c5e8fc2cac..d5adfd4ffa 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go @@ -1,14 +1,15 @@ package cache // import "github.com/docker/docker/daemon/logger/loggerutils/cache" import ( + "context" "strconv" + "github.com/containerd/log" "github.com/docker/docker/api/types/container" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/local" units "github.com/docker/go-units" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -91,7 +92,7 @@ func (l *loggerWithCache) ReadLogs(config logger.ReadConfig) *logger.LogWatcher func (l *loggerWithCache) Close() error { err := l.l.Close() if err := l.cache.Close(); err != nil { - logrus.WithError(err).Warn("error while shutting cache logger") + log.G(context.TODO()).WithError(err).Warn("error while shutting cache logger") } return err } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go index 0deabefe1a..f505691c4d 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/file_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package loggerutils diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go index 483e032d2c..106101937a 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go @@ -1,13 +1,14 @@ package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" import ( + "context" "fmt" "io" "os" + "github.com/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type follow struct { @@ -16,13 +17,13 @@ type follow struct { Decoder Decoder Forwarder *forwarder - log *logrus.Entry + log *log.Entry c chan logPos } // Do follows the log file as it is written, starting from f at read. func (fl *follow) Do(f *os.File, read logPos) { - fl.log = logrus.WithFields(logrus.Fields{ + fl.log = log.G(context.TODO()).WithFields(log.Fields{ "module": "logger", "file": f.Name(), }) diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index b37e93f875..572a3a7952 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -13,10 +13,10 @@ import ( "sync" "time" + "github.com/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/pools" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // rotateFileMetadata is a metadata of the gzip header of the compressed log file @@ -219,7 +219,7 @@ func (w *LogFile) rotate() (retErr error) { defer w.fsopMu.Unlock() if err := rotate(fname, w.maxFiles, w.compress); err != nil { - logrus.WithError(err).Warn("Error rotating log file, log data may have been lost") + log.G(context.TODO()).WithError(err).Warn("Error rotating log file, log data may have been lost") } else { // We may have readers working their way through the // current log file so we can't truncate it. We need to @@ -228,11 +228,11 @@ func (w *LogFile) rotate() (retErr error) { // current file out of the way. if w.maxFiles < 2 { if err := unlink(fname); err != nil && !errors.Is(err, fs.ErrNotExist) { - logrus.WithError(err).Error("Error unlinking current log file") + log.G(context.TODO()).WithError(err).Error("Error unlinking current log file") } } else { if err := os.Rename(fname, fname+".1"); err != nil && !errors.Is(err, fs.ErrNotExist) { - logrus.WithError(err).Error("Error renaming current log file") + log.G(context.TODO()).WithError(err).Error("Error renaming current log file") } } } @@ -262,7 +262,7 @@ func (w *LogFile) rotate() (retErr error) { // point during the compression process will a reader fail to // open a complete copy of the file. if err := compressFile(fname+".1", ts); err != nil { - logrus.WithError(err).Error("Error compressing log file after rotation") + log.G(context.TODO()).WithError(err).Error("Error compressing log file after rotation") } }() @@ -289,7 +289,7 @@ func rotate(name string, maxFiles int, compress bool) error { toPath := name + "." + strconv.Itoa(i) + extension fromPath := name + "." + strconv.Itoa(i-1) + extension err := os.Rename(fromPath, toPath) - logrus.WithError(err).WithField("source", fromPath).WithField("target", toPath).Trace("Rotating log file") + log.G(context.TODO()).WithError(err).WithField("source", fromPath).WithField("target", toPath).Trace("Rotating log file") if err != nil && !errors.Is(err, fs.ErrNotExist) { return err } @@ -302,7 +302,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { file, err := open(fileName) if err != nil { if errors.Is(err, fs.ErrNotExist) { - logrus.WithField("file", fileName).WithError(err).Debug("Could not open log file to compress") + log.G(context.TODO()).WithField("file", fileName).WithError(err).Debug("Could not open log file to compress") return nil } return errors.Wrap(err, "failed to open log file") @@ -317,7 +317,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { } }() - outFile, err := openFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640) + outFile, err := openFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o640) if err != nil { return errors.Wrap(err, "failed to open or create gzip log file") } @@ -325,7 +325,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { outFile.Close() if retErr != nil { if err := unlink(fileName + ".gz"); err != nil && !errors.Is(err, fs.ErrNotExist) { - logrus.WithError(err).Error("Error cleaning up after failed log compression") + log.G(context.TODO()).WithError(err).Error("Error cleaning up after failed log compression") } } }() @@ -339,7 +339,7 @@ func compressFile(fileName string, lastTimestamp time.Time) (retErr error) { compressWriter.Header.Extra, err = json.Marshal(&extra) if err != nil { // Here log the error only and don't return since this is just an optimization. - logrus.Warningf("Failed to marshal gzip header as JSON: %v", err) + log.G(context.TODO()).Warningf("Failed to marshal gzip header as JSON: %v", err) } _, err = pools.Copy(compressWriter, file) diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin.go b/vendor/github.com/docker/docker/daemon/logger/plugin.go index 354f08e108..424d7e101f 100644 --- a/vendor/github.com/docker/docker/daemon/logger/plugin.go +++ b/vendor/github.com/docker/docker/daemon/logger/plugin.go @@ -77,7 +77,7 @@ func makePluginCreator(name string, l logPlugin, scopePath func(s string) string unscopedPath := filepath.Join("/", "run", "docker", "logging") logRoot := scopePath(unscopedPath) - if err := os.MkdirAll(logRoot, 0700); err != nil { + if err := os.MkdirAll(logRoot, 0o700); err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go index a59fda860a..7a8c6aebd6 100644 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package logger // import "github.com/docker/docker/daemon/logger" @@ -16,7 +15,7 @@ func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { // Make sure to also open with read (in addition to write) to avoid borken pipe errors on plugin failure. // It is up to the plugin to keep track of pipes that it should re-attach to, however. // If the plugin doesn't open for reads, then the container will block once the pipe is full. - f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0700) + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0o700) if err != nil { return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) } diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go index fbbeba0c21..de2d4fb259 100644 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package logger // import "github.com/docker/docker/daemon/logger" diff --git a/vendor/github.com/docker/docker/daemon/logger/proxy.go b/vendor/github.com/docker/docker/daemon/logger/proxy.go index 4a1c778108..5cb03f95ee 100644 --- a/vendor/github.com/docker/docker/daemon/logger/proxy.go +++ b/vendor/github.com/docker/docker/daemon/logger/proxy.go @@ -74,9 +74,7 @@ type logPluginProxyCapabilitiesResponse struct { } func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { - var ( - ret logPluginProxyCapabilitiesResponse - ) + var ret logPluginProxyCapabilitiesResponse if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { return @@ -97,9 +95,7 @@ type logPluginProxyReadLogsRequest struct { } func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { - var ( - req logPluginProxyReadLogsRequest - ) + var req logPluginProxyReadLogsRequest req.Info = info req.Config = config diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go index 39646c45ee..8236d16ac0 100644 --- a/vendor/github.com/docker/docker/daemon/network/settings.go +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -15,16 +15,15 @@ import ( type Settings struct { Bridge string SandboxID string + SandboxKey string HairpinMode bool LinkLocalIPv6Address string LinkLocalIPv6PrefixLen int Networks map[string]*EndpointSettings Service *clustertypes.ServiceConfig Ports nat.PortMap - SandboxKey string SecondaryIPAddresses []networktypes.Address SecondaryIPv6Addresses []networktypes.Address - IsAnonymousEndpoint bool HasSwarmEndpoint bool } @@ -34,6 +33,9 @@ type Settings struct { type EndpointSettings struct { *networktypes.EndpointSettings IPAMOperational bool + // DesiredMacAddress is the configured value, it's copied from MacAddress (the + // API param field) when the container is created. + DesiredMacAddress string } // AttachmentStore stores the load balancer IP address for a network id. diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go index 61e7456b4e..a5523c3e95 100644 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs // ErrNotFound signals that the requested object doesn't exist type ErrNotFound interface { diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index fe06fb6f70..042de4b7b8 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import "context" diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 77bda389d1..ebcd789302 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import ( "net/http" diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index 3abf07d0c3..f94034cbd7 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -1,9 +1,18 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs + +import ( + "context" + "errors" +) type causer interface { Cause() error } +type wrapErr interface { + Unwrap() error +} + func getImplementer(err error) error { switch e := err.(type) { case @@ -23,6 +32,8 @@ func getImplementer(err error) error { return err case causer: return getImplementer(e.Cause()) + case wrapErr: + return getImplementer(e.Unwrap()) default: return err } @@ -105,3 +116,8 @@ func IsDataLoss(err error) bool { _, ok := getImplementer(err).(ErrDataLoss) return ok } + +// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. +func IsContext(err error) bool { + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go index 57109efeee..83eb6e7775 100644 --- a/vendor/github.com/docker/docker/image/fs.go +++ b/vendor/github.com/docker/docker/image/fs.go @@ -1,15 +1,16 @@ package image // import "github.com/docker/docker/image" import ( + "context" "fmt" "os" "path/filepath" "sync" + "github.com/containerd/log" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // DigestWalkFunc is function called by StoreBackend.Walk @@ -46,10 +47,10 @@ func newFSStore(root string) (*fs, error) { s := &fs{ root: root, } - if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0o700); err != nil { return nil, errors.Wrap(err, "failed to create storage backend") } - if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0o700); err != nil { return nil, errors.Wrap(err, "failed to create storage backend") } return s, nil @@ -75,7 +76,7 @@ func (s *fs) Walk(f DigestWalkFunc) error { for _, v := range dir { dgst := digest.NewDigestFromEncoded(digest.Canonical, v.Name()) if err := dgst.Validate(); err != nil { - logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + log.G(context.TODO()).Debugf("skipping invalid digest %s: %s", dgst, err) continue } if err := f(dgst); err != nil { @@ -117,7 +118,7 @@ func (s *fs) Set(data []byte) (digest.Digest, error) { } dgst := digest.FromBytes(data) - if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0o600); err != nil { return "", errors.Wrap(err, "failed to write digest data") } @@ -144,10 +145,10 @@ func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { } baseDir := filepath.Join(s.metadataDir(dgst)) - if err := os.MkdirAll(baseDir, 0700); err != nil { + if err := os.MkdirAll(baseDir, 0o700); err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0o600) } // GetMetadata returns metadata for a given digest. diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go index d17601323a..c955cbcb68 100644 --- a/vendor/github.com/docker/docker/image/image.go +++ b/vendor/github.com/docker/docker/image/image.go @@ -4,16 +4,16 @@ import ( "encoding/json" "errors" "io" - "reflect" "runtime" "strings" "time" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ID is the content-addressable ID of an image. @@ -52,7 +52,7 @@ type V1Image struct { Comment string `json:"comment,omitempty"` // Created is the timestamp at which the image was created - Created time.Time `json:"created"` + Created *time.Time `json:"created"` // Container is the ID of the container that was used to create the image. // @@ -174,6 +174,17 @@ func (img *Image) OperatingSystem() string { return os } +// Platform generates an OCI platform from the image +func (img *Image) Platform() ocispec.Platform { + return ocispec.Platform{ + Architecture: img.Architecture, + OS: img.OS, + OSVersion: img.OSVersion, + OSFeatures: img.OSFeatures, + Variant: img.Variant, + } +} + // MarshalJSON serializes the image to JSON. It sorts the top-level keys so // that JSON that's been manipulated by a push/pull cycle with a legacy // registry won't end up with a different key order. @@ -258,44 +269,21 @@ func Clone(base *Image, id ID) *Image { } // History stores build commands that were used to create an image -type History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} +type History = ocispec.History // NewHistory creates a new history struct from arguments, and sets the created // time to the current time in UTC func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + now := time.Now().UTC() return History{ Author: author, - Created: time.Now().UTC(), + Created: &now, CreatedBy: createdBy, Comment: comment, EmptyLayer: isEmptyLayer, } } -// Equal compares two history structs for equality -func (h History) Equal(i History) bool { - if !h.Created.Equal(i.Created) { - return false - } - i.Created = h.Created - - return reflect.DeepEqual(h, i) -} - // Exporter provides interface for loading and saving images type Exporter interface { Load(io.ReadCloser, io.Writer, bool) error diff --git a/vendor/github.com/docker/docker/image/image_os.go b/vendor/github.com/docker/docker/image/image_os.go new file mode 100644 index 0000000000..ff4fe36d3a --- /dev/null +++ b/vendor/github.com/docker/docker/image/image_os.go @@ -0,0 +1,18 @@ +package image + +import ( + "errors" + "runtime" + "strings" + + "github.com/docker/docker/errdefs" +) + +// CheckOS checks if the given OS matches the host's platform, and +// returns an error otherwise. +func CheckOS(os string) error { + if !strings.EqualFold(runtime.GOOS, os) { + return errdefs.InvalidParameter(errors.New("operating system is not supported")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go index f73a0660fa..efccd97eb2 100644 --- a/vendor/github.com/docker/docker/image/rootfs.go +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -1,10 +1,11 @@ package image // import "github.com/docker/docker/image" import ( + "context" "runtime" + "github.com/containerd/log" "github.com/docker/docker/layer" - "github.com/sirupsen/logrus" ) // TypeLayers is used for RootFS.Type for filesystems organized into layers. @@ -46,7 +47,7 @@ func (r *RootFS) Clone() *RootFS { // ChainID returns the ChainID for the top layer in RootFS. func (r *RootFS) ChainID() layer.ChainID { if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { - logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + log.G(context.TODO()).Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) return "" } return layer.CreateChainID(r.DiffIDs) diff --git a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go new file mode 100644 index 0000000000..1672617635 --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go @@ -0,0 +1,54 @@ +package v1 + +import ( + "time" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json" + +// DockerOCIImage is a ocispec.Image extended with Docker specific Config. +type DockerOCIImage struct { + ocispec.Image + + // Shadow ocispec.Image.Config + Config DockerOCIImageConfig `json:"config,omitempty"` +} + +// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields. +type DockerOCIImageConfig struct { + ocispec.ImageConfig + + DockerOCIImageConfigExt +} + +// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig. +type DockerOCIImageConfigExt struct { + Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + + OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile + Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature. +type HealthcheckConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go index c457bc3a82..3671436125 100644 --- a/vendor/github.com/docker/docker/image/store.go +++ b/vendor/github.com/docker/docker/image/store.go @@ -1,17 +1,18 @@ package image // import "github.com/docker/docker/image" import ( + "context" "fmt" + "os" "sync" "time" + "github.com/containerd/log" "github.com/docker/docker/errdefs" "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" "github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest/digestset" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // Store is an interface for creating and accessing images @@ -24,6 +25,8 @@ type Store interface { GetParent(id ID) (ID, error) SetLastUpdated(id ID) error GetLastUpdated(id ID) (time.Time, error) + SetBuiltLocally(id ID) error + IsBuiltLocally(id ID) (bool, error) Children(id ID) []ID Map() map[ID]*Image Heads() map[ID]*Image @@ -68,28 +71,28 @@ func NewImageStore(fs StoreBackend, lss LayerGetReleaser) (Store, error) { func (is *store) restore() error { // As the code below is run when restoring all images (which can be "many"), - // constructing the "logrus.WithFields" is deliberately not "DRY", as the + // constructing the "log.G(ctx).WithFields" is deliberately not "DRY", as the // logger is only used for error-cases, and we don't want to do allocations // if we don't need it. The "f" type alias is here is just for convenience, // and to make the code _slightly_ more DRY. See the discussion on GitHub; // https://github.com/moby/moby/pull/44426#discussion_r1059519071 - type f = logrus.Fields + type f = log.Fields err := is.fs.Walk(func(dgst digest.Digest) error { img, err := is.Get(ID(dgst)) if err != nil { - logrus.WithFields(f{"digest": dgst, "err": err}).Error("invalid image") + log.G(context.TODO()).WithFields(f{"digest": dgst, "err": err}).Error("invalid image") return nil } var l layer.Layer if chainID := img.RootFS.ChainID(); chainID != "" { - if !system.IsOSSupported(img.OperatingSystem()) { - logrus.WithFields(f{"chainID": chainID, "os": img.OperatingSystem()}).Error("not restoring image with unsupported operating system") + if err := CheckOS(img.OperatingSystem()); err != nil { + log.G(context.TODO()).WithFields(f{"chainID": chainID, "os": img.OperatingSystem()}).Error("not restoring image with unsupported operating system") return nil } l, err = is.lss.Get(chainID) if err != nil { if errors.Is(err, layer.ErrLayerDoesNotExist) { - logrus.WithFields(f{"chainID": chainID, "os": img.OperatingSystem(), "err": err}).Error("not restoring image") + log.G(context.TODO()).WithFields(f{"chainID": chainID, "os": img.OperatingSystem(), "err": err}).Error("not restoring image") return nil } return err @@ -163,8 +166,8 @@ func (is *store) Create(config []byte) (ID, error) { var l layer.Layer if layerID != "" { - if !system.IsOSSupported(img.OperatingSystem()) { - return "", errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) + if err := CheckOS(img.OperatingSystem()); err != nil { + return "", err } l, err = is.lss.Get(layerID) if err != nil { @@ -246,7 +249,7 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) { } if err := is.digestSet.Remove(id.Digest()); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) + log.G(context.TODO()).Errorf("error removing %s from digest set: %q", id, err) } delete(is.images, id) is.fs.Delete(id.Digest()) @@ -295,6 +298,23 @@ func (is *store) GetLastUpdated(id ID) (time.Time, error) { return time.Parse(time.RFC3339Nano, string(bytes)) } +// SetBuiltLocally sets whether image can be used as a builder cache +func (is *store) SetBuiltLocally(id ID) error { + return is.fs.SetMetadata(id.Digest(), "builtLocally", []byte{1}) +} + +// IsBuiltLocally returns whether image can be used as a builder cache +func (is *store) IsBuiltLocally(id ID) (bool, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "builtLocally") + if err != nil || len(bytes) == 0 { + if errors.Is(err, os.ErrNotExist) { + err = nil + } + return false, err + } + return bytes[0] == 1, nil +} + func (is *store) Children(id ID) []ID { is.RLock() defer is.RUnlock() @@ -332,7 +352,7 @@ func (is *store) imagesMap(all bool) map[ID]*Image { } img, err := is.Get(id) if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) + log.G(context.TODO()).Errorf("invalid image access: %q, error: %q", id, err) continue } images[id] = img diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go new file mode 100644 index 0000000000..cf4d6a5957 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/multierror/multierror.go @@ -0,0 +1,46 @@ +package multierror + +import ( + "strings" +) + +// Join is a drop-in replacement for errors.Join with better formatting. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + if len(e.errs) == 1 { + return strings.TrimSpace(e.errs[0].Error()) + } + stringErrs := make([]string, 0, len(e.errs)) + for _, subErr := range e.errs { + stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) + } + return "* " + strings.Join(stringErrs, "\n* ") +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/docker/docker/internal/unshare/unshare_linux.go b/vendor/github.com/docker/docker/internal/unshare/unshare_linux.go index 14bbec4871..0b210e8193 100644 --- a/vendor/github.com/docker/docker/internal/unshare/unshare_linux.go +++ b/vendor/github.com/docker/docker/internal/unshare/unshare_linux.go @@ -1,5 +1,4 @@ //go:build go1.10 -// +build go1.10 package unshare // import "github.com/docker/docker/internal/unshare" diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go index f2465b3b04..96ede8711b 100644 --- a/vendor/github.com/docker/docker/layer/filestore.go +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -2,6 +2,7 @@ package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" + "context" "encoding/json" "io" "os" @@ -10,11 +11,11 @@ import ( "strconv" "strings" + "github.com/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/pkg/ioutils" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var ( @@ -322,7 +323,7 @@ func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) { nameSplit := strings.Split(fi.Name(), "-") dgst := digest.NewDigestFromEncoded(algorithm, nameSplit[0]) if err := dgst.Validate(); err != nil { - logrus.WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest") + log.G(context.TODO()).WithError(err).WithField("digest", string(algorithm)+":"+nameSplit[0]).Debug("ignoring invalid digest") continue } @@ -330,13 +331,13 @@ func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) { contentBytes, err := os.ReadFile(chainFile) if err != nil { if !os.IsNotExist(err) { - logrus.WithError(err).WithField("digest", dgst).Error("failed to read cache ID") + log.G(context.TODO()).WithError(err).WithField("digest", dgst).Error("failed to read cache ID") } continue } cacheID := strings.TrimSpace(string(contentBytes)) if cacheID == "" { - logrus.Error("invalid cache ID") + log.G(context.TODO()).Error("invalid cache ID") continue } @@ -366,7 +367,7 @@ func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { if fi.IsDir() && fi.Name() != "mounts" { dgst := digest.NewDigestFromEncoded(algorithm, fi.Name()) if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + log.G(context.TODO()).Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) } else { ids = append(ids, ChainID(dgst)) } @@ -410,17 +411,17 @@ func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error { chainFile := filepath.Join(dir, "cache-id") contentBytes, err := os.ReadFile(chainFile) if err != nil { - logrus.WithError(err).WithField("file", chainFile).Error("cannot get cache ID") + log.G(context.TODO()).WithError(err).WithField("file", chainFile).Error("cannot get cache ID") continue } cacheID := strings.TrimSpace(string(contentBytes)) if cacheID != cache { continue } - logrus.Debugf("Removing folder: %s", dir) + log.G(context.TODO()).Debugf("Removing folder: %s", dir) err = os.RemoveAll(dir) if err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("name", f.Name()).Error("cannot remove layer") + log.G(context.TODO()).WithError(err).WithField("name", f.Name()).Error("cannot remove layer") continue } } diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go index 330f359632..28ad0fc9c7 100644 --- a/vendor/github.com/docker/docker/layer/layer.go +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -10,13 +10,14 @@ package layer // import "github.com/docker/docker/layer" import ( + "context" "errors" "io" + "github.com/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/pkg/archive" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) var ( @@ -173,12 +174,10 @@ type Store interface { Get(ChainID) (Layer, error) Map() map[ChainID]Layer Release(Layer) ([]Metadata, error) - CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) GetRWLayer(id string) (RWLayer, error) GetMountID(id string) (string, error) ReleaseRWLayer(RWLayer) ([]Metadata, error) - Cleanup() error DriverStatus() [][2]string DriverName() string @@ -212,7 +211,7 @@ func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { func ReleaseAndLog(ls Store, l Layer) { metadata, err := ls.Release(l) if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + log.G(context.TODO()).Errorf("Error releasing layer %s: %v", l.ChainID(), err) } LogReleaseMetadata(metadata) } @@ -221,6 +220,6 @@ func ReleaseAndLog(ls Store, l Layer) { // ensure consistent logging for release metadata func LogReleaseMetadata(metadatas []Metadata) { for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) + log.G(context.TODO()).Infof("Layer %s cleaned up", metadata.ChainID) } } diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go index 221c1df7ad..6c010070c3 100644 --- a/vendor/github.com/docker/docker/layer/layer_store.go +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -1,6 +1,7 @@ package layer // import "github.com/docker/docker/layer" import ( + "context" "errors" "fmt" "io" @@ -8,6 +9,7 @@ import ( "path/filepath" "sync" + "github.com/containerd/log" "github.com/docker/distribution" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" @@ -15,7 +17,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/moby/locker" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -67,7 +68,7 @@ func NewStoreFromOptions(options StoreOptions) (Store, error) { } return nil, fmt.Errorf("error initializing graphdriver: %v", err) } - logrus.Debugf("Initialized graph driver %s", driver) + log.G(context.TODO()).Debugf("Initialized graph driver %s", driver) root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) @@ -105,7 +106,7 @@ func newStoreFromGraphDriver(root string, driver graphdriver.Driver) (Store, err for _, id := range ids { l, err := ls.loadLayer(id) if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) + log.G(context.TODO()).Debugf("Failed to load layer %s: %s", id, err) continue } if l.parent != nil { @@ -115,7 +116,7 @@ func newStoreFromGraphDriver(root string, driver graphdriver.Driver) (Store, err for _, mount := range mounts { if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) + log.G(context.TODO()).Debugf("Failed to load mount %s: %s", mount, err) } } @@ -258,7 +259,7 @@ func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent layer.size = applySize layer.diffID = DiffID(digester.Digest()) - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + log.G(context.TODO()).Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) return nil } @@ -318,12 +319,12 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descr defer func() { if cErr != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, cErr) + log.G(context.TODO()).Debugf("Cleaning up layer %s: %v", layer.cacheID, cErr) if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + log.G(context.TODO()).Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) } if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + log.G(context.TODO()).Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() @@ -576,7 +577,7 @@ func (ls *layerStore) GetMountID(id string) (string, error) { if mount == nil { return "", ErrMountDoesNotExist } - logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + log.G(context.TODO()).Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) return mount.mountID, nil } @@ -602,21 +603,21 @@ func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { } if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + log.G(context.TODO()).Errorf("Error removing mounted layer %s: %s", m.name, err) m.retakeReference(l) return nil, err } if m.initID != "" { if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) + log.G(context.TODO()).Errorf("Error removing init layer %s: %s", m.name, err) m.retakeReference(l) return nil, err } } if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + log.G(context.TODO()).Errorf("Error removing mount metadata: %s: %s", m.name, err) m.retakeReference(l) return nil, err } @@ -735,28 +736,28 @@ func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size metaUnpacker := storage.NewJSONUnpacker(metadata) upackerCounter := &unpackSizeCounter{metaUnpacker, size} - logrus.Debugf("Assembling tar data for %s", graphID) + log.G(context.TODO()).Debugf("Assembling tar data for %s", graphID) return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) } func (ls *layerStore) Cleanup() error { orphanLayers, err := ls.store.getOrphan() if err != nil { - logrus.WithError(err).Error("cannot get orphan layers") + log.G(context.TODO()).WithError(err).Error("cannot get orphan layers") } if len(orphanLayers) > 0 { - logrus.Debugf("found %v orphan layers", len(orphanLayers)) + log.G(context.TODO()).Debugf("found %v orphan layers", len(orphanLayers)) } for _, orphan := range orphanLayers { - logrus.WithField("cache-id", orphan.cacheID).Debugf("removing orphan layer, chain ID: %v", orphan.chainID) + log.G(context.TODO()).WithField("cache-id", orphan.cacheID).Debugf("removing orphan layer, chain ID: %v", orphan.chainID) err = ls.driver.Remove(orphan.cacheID) if err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer") + log.G(context.TODO()).WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer") continue } err = ls.store.Remove(orphan.chainID, orphan.cacheID) if err != nil { - logrus.WithError(err).WithField("chain-id", orphan.chainID).Error("cannot remove orphan layer metadata") + log.G(context.TODO()).WithError(err).WithField("chain-id", orphan.chainID).Error("cannot remove orphan layer metadata") } } return ls.driver.Cleanup() diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go index 24cb880092..989e9ddcf0 100644 --- a/vendor/github.com/docker/docker/layer/layer_unix.go +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd package layer // import "github.com/docker/docker/layer" diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go index 0d97c6eca2..e54b60a43b 100644 --- a/vendor/github.com/docker/docker/layer/migration.go +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -2,55 +2,18 @@ package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" + "context" "errors" "io" "os" + "github.com/containerd/log" "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) -func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { - defer func() { - if err != nil { - diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) - } - }() - - if oldTarDataPath == "" { - err = errors.New("no tar-split file") - return - } - - tarDataFile, err := os.Open(oldTarDataPath) - if err != nil { - return - } - defer tarDataFile.Close() - uncompressed, err := gzip.NewReader(tarDataFile) - if err != nil { - return - } - - dgst := digest.Canonical.Digester() - err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) - if err != nil { - return - } - - diffID = DiffID(dgst.Digest()) - err = os.RemoveAll(newTarDataPath) - if err != nil { - return - } - err = os.Link(oldTarDataPath, newTarDataPath) - - return -} - -func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { +func (ls *layerStore) ChecksumForGraphID(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { rawarchive, err := ls.driver.Diff(id, parent) if err != nil { return @@ -132,9 +95,9 @@ func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID D defer func() { if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + log.G(context.TODO()).Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + log.G(context.TODO()).Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) } } }() diff --git a/vendor/github.com/docker/docker/libcontainerd/types/types.go b/vendor/github.com/docker/docker/libcontainerd/types/types.go index 0b484e23f0..f05eb4fc90 100644 --- a/vendor/github.com/docker/docker/libcontainerd/types/types.go +++ b/vendor/github.com/docker/docker/libcontainerd/types/types.go @@ -64,7 +64,7 @@ type Client interface { // Container provides access to a containerd container. type Container interface { - Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio StdioCallback) (Task, error) + NewTask(ctx context.Context, checkpointDir string, withStdin bool, attachStdio StdioCallback) (Task, error) Task(ctx context.Context) (Task, error) // AttachTask returns the current task for the container and reattaches // to the IO for the running task. If no task exists for the container @@ -79,6 +79,8 @@ type Container interface { // Task provides access to a running containerd container. type Task interface { Process + // Start begins execution of the task + Start(context.Context) error // Pause suspends the execution of the task Pause(context.Context) error // Resume the execution of the task diff --git a/vendor/github.com/docker/docker/oci/caps/utils_linux.go b/vendor/github.com/docker/docker/oci/caps/utils_linux.go index 06dc3410fc..ffb301cf81 100644 --- a/vendor/github.com/docker/docker/oci/caps/utils_linux.go +++ b/vendor/github.com/docker/docker/oci/caps/utils_linux.go @@ -1,9 +1,10 @@ package caps // import "github.com/docker/docker/oci/caps" import ( + "context" "sync" ccaps "github.com/containerd/containerd/pkg/cap" - "github.com/sirupsen/logrus" + "github.com/containerd/log" ) var initCapsOnce sync.Once @@ -13,7 +14,7 @@ func initCaps() { rawCaps := ccaps.Known() curCaps, err := ccaps.Current() if err != nil { - logrus.WithError(err).Error("failed to get capabilities from current environment") + log.G(context.TODO()).WithError(err).Error("failed to get capabilities from current environment") allCaps = rawCaps } else { allCaps = curCaps diff --git a/vendor/github.com/docker/docker/oci/caps/utils_other.go b/vendor/github.com/docker/docker/oci/caps/utils_other.go index 5634a65720..03c3d9df9f 100644 --- a/vendor/github.com/docker/docker/oci/caps/utils_other.go +++ b/vendor/github.com/docker/docker/oci/caps/utils_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package caps // import "github.com/docker/docker/oci/caps" diff --git a/vendor/github.com/docker/docker/oci/defaults.go b/vendor/github.com/docker/docker/oci/defaults.go index 0dc46543fb..c3dae8b109 100644 --- a/vendor/github.com/docker/docker/oci/defaults.go +++ b/vendor/github.com/docker/docker/oci/defaults.go @@ -123,11 +123,11 @@ func DefaultLinuxSpec() specs.Spec { "/proc/sysrq-trigger", }, Namespaces: []specs.LinuxNamespace{ - {Type: "mount"}, - {Type: "network"}, - {Type: "uts"}, - {Type: "pid"}, - {Type: "ipc"}, + {Type: specs.MountNamespace}, + {Type: specs.NetworkNamespace}, + {Type: specs.UTSNamespace}, + {Type: specs.PIDNamespace}, + {Type: specs.IPCNamespace}, }, // Devices implicitly contains the following devices: // null, zero, full, random, urandom, tty, console, and ptmx. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 34361a24ac..43133a0950 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -21,6 +21,7 @@ import ( "time" "github.com/containerd/containerd/pkg/userns" + "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" @@ -29,7 +30,6 @@ import ( "github.com/moby/patternmatcher" "github.com/moby/sys/sequential" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a @@ -42,7 +42,7 @@ import ( // This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is // subject to change in Moby at any time -- image authors who require consistent or known directory permissions // should explicitly control them by ensuring that header entries exist for any applicable path. -const ImpliedDirectoryMode = 0755 +const ImpliedDirectoryMode = 0o755 type ( // Compression is the state represents if compressed or not. @@ -70,6 +70,12 @@ type ( // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool + // Allow unpacking to succeed in spite of failures to set extended + // attributes on the unpacked files due to the destination filesystem + // not supporting them or a lack of permissions. Extended attributes + // were probably in the archive for a reason, so set this option at + // your own peril. + BestEffortXattrs bool } ) @@ -199,21 +205,21 @@ func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { - logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { - logrus.Debugf("unpigz binary not found, falling back to go gzip library") + log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } - logrus.Debugf("Using %s to decompress", unpigzPath) + log.G(ctx).Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } @@ -475,6 +481,8 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return hdr, nil } +const paxSchilyXattr = "SCHILY.xattr." + // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { @@ -487,15 +495,16 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { - length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 - length = xattrCapsSz2 + capability = capability[:xattrCapsSz2] + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) } - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability[:length]) + hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) } return nil } @@ -666,7 +675,19 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { + var ( + Lchown = true + inUserns, bestEffortXattrs bool + chownOpts *idtools.Identity + ) + if opts != nil { + Lchown = !opts.NoLchown + inUserns = opts.InUserNS + chownOpts = opts.ChownOpts + bestEffortXattrs = opts.BestEffortXattrs + } + // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -736,7 +757,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") + log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") return nil default: @@ -757,26 +778,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } } - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP || err == syscall.EPERM { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. + var xattrErrs []string + for key, value := range hdr.PAXRecords { + xattr, ok := strings.CutPrefix(key, paxSchilyXattr) + if !ok { + continue + } + if err := system.Lsetxattr(path, xattr, []byte(value), 0); err != nil { + if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). - errors = append(errors, err.Error()) + xattrErrs = append(xattrErrs, err.Error()) continue } return err } } - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, + if len(xattrErrs) > 0 { + log.G(context.TODO()).WithFields(log.Fields{ + "errors": xattrErrs, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } @@ -893,13 +914,13 @@ func (t *Tarballer) Do() { defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) + log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) } if err := t.compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) + log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) } if err := t.pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) + log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) } }() @@ -922,7 +943,7 @@ func (t *Tarballer) Do() { // directory. So, we must split the source path and use the // basename as the include. if len(t.options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") + log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(t.srcPath) @@ -947,7 +968,7 @@ func (t *Tarballer) Do() { walkRoot := getWalkRoot(t.srcPath, include) filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) + log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) return nil } @@ -986,7 +1007,7 @@ func (t *Tarballer) Do() { skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) } if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) + log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) return err } @@ -1047,7 +1068,7 @@ func (t *Tarballer) Do() { } if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err @@ -1084,7 +1105,7 @@ loop: // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { - logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } @@ -1158,7 +1179,7 @@ loop: } } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + if err := createTarFile(path, dest, hdr, trBuf, options); err != nil { return err } @@ -1297,7 +1318,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) @@ -1322,7 +1343,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0o700); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 76321a35e3..2c3786cd50 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -21,8 +21,7 @@ func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConv return nil, nil } -type overlayWhiteoutConverter struct { -} +type overlayWhiteoutConverter struct{} func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { // convert whiteouts to AUFS format @@ -30,7 +29,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0o600 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } @@ -42,9 +41,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os return nil, err } if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } + delete(hdr.PAXRecords, paxSchilyXattr+"trusted.overlay.opaque") // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go index 28ae2769c5..3de1d64c80 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 92d8e23dd0..ff59d01975 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" @@ -8,6 +7,7 @@ import ( "errors" "os" "path/filepath" + "runtime" "strings" "syscall" @@ -44,6 +44,20 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { // statUnix populates hdr from system-dependent fields of fi without performing // any OS lookups. func statUnix(fi os.FileInfo, hdr *tar.Header) error { + // Devmajor and Devminor are only needed for special devices. + + // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): + // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 + // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). + + // ZFS in particular does not override the default: + // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 + + // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). + // Such large values cannot be encoded in a tar header. + if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { + return nil + } s, ok := fi.Sys().(*syscall.Stat_t) if !ok { return nil @@ -83,7 +97,7 @@ func getFileUIDGID(stat interface{}) (idtools.Identity, error) { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) + mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { case tar.TypeBlock: mode |= unix.S_IFBLK diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 7f7242be50..f9f16c9259 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -3,6 +3,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" + "context" "fmt" "io" "os" @@ -12,10 +13,10 @@ import ( "syscall" "time" + "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // ChangeType represents the change type. @@ -107,8 +108,10 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { return "", nil } -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +) func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { var ( @@ -341,9 +344,7 @@ func newRootFileInfo() *FileInfo { // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) + var oldRoot, newRoot *FileInfo if oldDir == "" { emptyDir, err := os.MkdirTemp("", "empty") if err != nil { @@ -371,7 +372,7 @@ func ChangesSize(newDir string, changes []Change) int64 { file := filepath.Join(newDir, change.Path) fileInfo, err := os.Lstat(file) if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) + log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) continue } @@ -420,22 +421,22 @@ func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) + log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) + log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) + log.G(context.TODO()).Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) + log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) } }() return reader, nil diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go index f8792b3d4e..81fcbc5bab 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -267,7 +267,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) continue } bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) + name := string(bytes[0:clen(bytes[:])]) if name == "." || name == ".." { // Useless names continue } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go index 833798bd11..13a7d3c0c6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index 54aace970e..853c73ee8c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 0ea1596278..01eadc30d9 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -2,14 +2,15 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "errors" "io" "os" "path/filepath" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // Errors used or returned by this file. @@ -107,7 +108,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er sourceDir, sourceBase := SplitPathDirEntry(sourcePath) opts := TarResourceRebaseOpts(sourceBase, rebaseName) - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, opts) } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go index 2ac7729f4c..065bd4adda 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 1a2fb971f9..318f594212 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -2,6 +2,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "fmt" "io" "os" @@ -9,9 +10,9 @@ import ( "runtime" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be @@ -67,7 +68,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } @@ -92,7 +93,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { return 0, err } } @@ -183,7 +184,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, return 0, err } - if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go index d7f806445e..7216f2f4f9 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go index 0b135aea75..390264bf85 100644 --- a/vendor/github.com/docker/docker/pkg/archive/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/path_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go index d087796861..14c4ceb1d8 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go index 5745da9d1f..3b6d8a77aa 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -68,7 +68,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil { return err } } diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_linux.go new file mode 100644 index 0000000000..f4d61ddf92 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_linux.go @@ -0,0 +1,53 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func doUnpack(decompressedArchive io.Reader, relDest, root string, options *archive.TarOptions) error { + done := make(chan error) + err := goInChroot(root, func() { done <- archive.Unpack(decompressedArchive, relDest, options) }) + if err != nil { + return err + } + return <-done +} + +func doPack(relSrc, root string, options *archive.TarOptions) (io.ReadCloser, error) { + tb, err := archive.NewTarballer(relSrc, options) + if err != nil { + return nil, errors.Wrap(err, "error processing tar file") + } + err = goInChroot(root, tb.Do) + if err != nil { + return nil, errors.Wrap(err, "could not chroot") + } + return tb.Reader(), nil +} + +func doUnpackLayer(root string, layer io.Reader, options *archive.TarOptions) (int64, error) { + type result struct { + layerSize int64 + err error + } + done := make(chan result) + + err := goInChroot(root, func() { + // We need to be able to set any perms + _ = unix.Umask(0) + + size, err := archive.UnpackLayer("/", layer, options) + done <- result{layerSize: size, err: err} + }) + if err != nil { + return 0, err + } + + res := <-done + + return res.layerSize, res.err +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go index 1e0e2382ec..c09baf4475 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" @@ -27,12 +26,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T return err } - done := make(chan error) - err = goInChroot(root, func() { done <- archive.Unpack(decompressedArchive, relDest, options) }) - if err != nil { - return err - } - return <-done + return doUnpack(decompressedArchive, relDest, root, options) } func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { @@ -46,15 +40,7 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re relSrc += "/" } - tb, err := archive.NewTarballer(relSrc, options) - if err != nil { - return nil, errors.Wrap(err, "error processing tar file") - } - err = goInChroot(root, tb.Do) - if err != nil { - return nil, errors.Wrap(err, "could not chroot") - } - return tb.Reader(), nil + return doPack(relSrc, root, options) } // resolvePathInChroot returns the equivalent to path inside a chroot rooted at root. diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix_nolinux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix_nolinux.go new file mode 100644 index 0000000000..13e557b128 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix_nolinux.go @@ -0,0 +1,226 @@ +//go:build unix && !linux + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "syscall" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +const ( + packCmd = "chrootarchive-pack-in-chroot" + unpackCmd = "chrootarchive-unpack-in-chroot" + unpackLayerCmd = "chrootarchive-unpack-layer-in-chroot" +) + +func init() { + reexec.Register(packCmd, reexecMain(packInChroot)) + reexec.Register(unpackCmd, reexecMain(unpackInChroot)) + reexec.Register(unpackLayerCmd, reexecMain(unpackLayerInChroot)) +} + +func reexecMain(f func(options archive.TarOptions, args ...string) error) func() { + return func() { + if len(os.Args) < 2 { + fmt.Fprintln(os.Stderr, "root parameter is required") + os.Exit(1) + } + + options, err := recvOptions() + root := os.Args[1] + + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + if err := syscall.Chroot(root); err != nil { + fmt.Fprintln( + os.Stderr, + os.PathError{Op: "chroot", Path: root, Err: err}, + ) + os.Exit(2) + } + + if err := f(*options, os.Args[2:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(3) + } + } +} + +func doUnpack(decompressedArchive io.Reader, relDest, root string, options *archive.TarOptions) error { + optionsR, optionsW, err := os.Pipe() + if err != nil { + return err + } + defer optionsW.Close() + defer optionsR.Close() + + stderr := bytes.NewBuffer(nil) + + cmd := reexec.Command(unpackCmd, root, relDest) + cmd.Stdin = decompressedArchive + cmd.Stderr = stderr + cmd.ExtraFiles = []*os.File{ + optionsR, + } + + if err = cmd.Start(); err != nil { + return errors.Wrap(err, "re-exec error") + } + + if err = json.NewEncoder(optionsW).Encode(options); err != nil { + return errors.Wrap(err, "tar options encoding failed") + } + + if err = cmd.Wait(); err != nil { + return errors.Wrap(err, stderr.String()) + } + + return nil +} + +func doPack(relSrc, root string, options *archive.TarOptions) (io.ReadCloser, error) { + optionsR, optionsW, err := os.Pipe() + if err != nil { + return nil, err + } + defer optionsW.Close() + defer optionsR.Close() + + stderr := bytes.NewBuffer(nil) + cmd := reexec.Command(packCmd, root, relSrc) + cmd.ExtraFiles = []*os.File{ + optionsR, + } + cmd.Stderr = stderr + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + r, w := io.Pipe() + + if err = cmd.Start(); err != nil { + return nil, errors.Wrap(err, "re-exec error") + } + + go func() { + _, _ = io.Copy(w, stdout) + // Cleanup once stdout pipe is closed. + if err = cmd.Wait(); err != nil { + r.CloseWithError(errors.Wrap(err, stderr.String())) + } else { + r.Close() + } + }() + + if err = json.NewEncoder(optionsW).Encode(options); err != nil { + return nil, errors.Wrap(err, "tar options encoding failed") + } + + return r, nil +} + +func doUnpackLayer(root string, layer io.Reader, options *archive.TarOptions) (int64, error) { + var result int64 + optionsR, optionsW, err := os.Pipe() + if err != nil { + return 0, err + } + defer optionsW.Close() + defer optionsR.Close() + buffer := bytes.NewBuffer(nil) + + cmd := reexec.Command(unpackLayerCmd, root) + cmd.Stdin = layer + cmd.Stdout = buffer + cmd.Stderr = buffer + cmd.ExtraFiles = []*os.File{ + optionsR, + } + + if err = cmd.Start(); err != nil { + return 0, errors.Wrap(err, "re-exec error") + } + + if err = json.NewEncoder(optionsW).Encode(options); err != nil { + return 0, errors.Wrap(err, "tar options encoding failed") + } + + if err = cmd.Wait(); err != nil { + return 0, errors.Wrap(err, buffer.String()) + } + + if err = json.NewDecoder(buffer).Decode(&result); err != nil { + return 0, errors.Wrap(err, "json decoding error") + } + + return result, nil +} + +func unpackInChroot(options archive.TarOptions, args ...string) error { + if len(args) < 1 { + return fmt.Errorf("destination parameter is required") + } + + relDest := args[0] + + return archive.Unpack(os.Stdin, relDest, &options) +} + +func packInChroot(options archive.TarOptions, args ...string) error { + if len(args) < 1 { + return fmt.Errorf("source parameter is required") + } + + relSrc := args[0] + + tb, err := archive.NewTarballer(relSrc, &options) + if err != nil { + return err + } + + go tb.Do() + + _, err = io.Copy(os.Stdout, tb.Reader()) + + return err +} + +func unpackLayerInChroot(options archive.TarOptions, _args ...string) error { + // We need to be able to set any perms + _ = unix.Umask(0) + + size, err := archive.UnpackLayer("/", os.Stdin, &options) + if err != nil { + return err + } + + return json.NewEncoder(os.Stdout).Encode(size) +} + +func recvOptions() (*archive.TarOptions, error) { + var options archive.TarOptions + optionsPipe := os.NewFile(3, "tar-options") + if optionsPipe == nil { + return nil, fmt.Errorf("could not read tar options from the pipe") + } + defer optionsPipe.Close() + err := json.NewDecoder(optionsPipe).Decode(&options) + if err != nil { + return &options, err + } + + return &options, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go index 7095740a50..6ecf06c816 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -7,9 +7,7 @@ import ( "github.com/docker/docker/pkg/longpath" ) -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions, root string) error { +func invokeUnpack(decompressedArchive io.ReadCloser, dest string, options *archive.TarOptions, root string) error { // Windows is different to Linux here because Windows does not support // chroot. Hence there is no point sandboxing a chrooted process to // do the unpack. We call inline instead within the daemon process. diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go index 873390c57f..e3dd4cead0 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" @@ -9,7 +8,6 @@ import ( "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/archive" - "golang.org/x/sys/unix" ) // applyLayerHandler parses a diff in the standard layer format from `layer`, and @@ -35,23 +33,5 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - - type result struct { - layerSize int64 - err error - } - - done := make(chan result) - err = goInChroot(dest, func() { - // We need to be able to set any perms - _ = unix.Umask(0) - - size, err := archive.UnpackLayer("/", layer, options) - done <- result{layerSize: size, err: err} - }) - if err != nil { - return 0, err - } - res := <-done - return res.layerSize, res.err + return doUnpackLayer(dest, layer, options) } diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go index e65f783a23..f71bb036c7 100644 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go @@ -6,8 +6,22 @@ import ( "github.com/moby/sys/symlink" ) +// CleanScopedPath prepares the given path to be combined with a mount path or +// a drive-letter. On Windows, it removes any existing driveletter (e.g. "C:"). +// The returned path is always prefixed with a [filepath.Separator]. +func CleanScopedPath(path string) string { + if len(path) >= 2 { + if v := filepath.VolumeName(path); len(v) > 0 { + path = path[len(v):] + } + } + return filepath.Join(string(filepath.Separator), path) +} + // ResolveScopedPath evaluates the given path scoped to the root. // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. +// +// Deprecated: use [symlink.FollowSymlinkInScope]. func ResolveScopedPath(root, path string) (string, error) { return symlink.FollowSymlinkInScope(filepath.Join(root, path), root) } diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go deleted file mode 100644 index 3e18599989..0000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows -// +build !windows - -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import "path/filepath" - -// CleanScopedPath preappends a to combine with a mnt path. -func CleanScopedPath(path string) string { - return filepath.Join(string(filepath.Separator), path) -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go deleted file mode 100644 index f4011d1203..0000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go +++ /dev/null @@ -1,15 +0,0 @@ -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import "path/filepath" - -// CleanScopedPath removes the C:\ syntax, and prepares to combine -// with a volume path -func CleanScopedPath(path string) string { - if len(path) >= 2 { - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - path = path[2:] - } - } - return filepath.Join(string(filepath.Separator), path) -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/rm.go b/vendor/github.com/docker/docker/pkg/containerfs/rm.go index 7abca3a804..303714a180 100644 --- a/vendor/github.com/docker/docker/pkg/containerfs/rm.go +++ b/vendor/github.com/docker/docker/pkg/containerfs/rm.go @@ -1,5 +1,4 @@ //go:build !darwin && !windows -// +build !darwin,!windows package containerfs // import "github.com/docker/docker/pkg/containerfs" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 0000000000..590683206c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,44 @@ +package homedir + +import ( + "os" + "os/user" + "runtime" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func Key() string { + return envKeyName +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func GetShortcutString() string { + return homeShortCut +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 11f1bec985..4eeb26b5dc 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package homedir // import "github.com/docker/docker/pkg/homedir" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index d1732dee52..feae4d736c 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,39 +1,8 @@ //go:build !windows -// +build !windows package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" - "os/user" +const ( + envKeyName = "HOME" + homeShortCut = "~" ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go index 2f81813b28..37f4ee6701 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -1,24 +1,6 @@ package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" +const ( + envKeyName = "USERPROFILE" + homeShortCut = "%USERPROFILE%" // be careful while using in format functions ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index a4001c3b87..cd621bdcc2 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" @@ -11,15 +10,9 @@ import ( "os/exec" "path/filepath" "strconv" - "sync" "syscall" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string + "github.com/moby/sys/user" ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { @@ -162,10 +155,10 @@ func getentGroup(name string) (user.Group, error) { } func callGetent(database, key string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("unable to find getent command") + getentCmd, err := resolveBinary("getent") + // if no `getent` command within the execution environment, can't do anything else + if err != nil { + return nil, fmt.Errorf("unable to find getent command: %w", err) } command := exec.Command(getentCmd, database, key) // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go index 5e24577e2c..6a9311c4a7 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package idtools // import "github.com/docker/docker/pkg/idtools" diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go index 05cc696365..517a2f52ca 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index de00b95e3f..e03d3fee75 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" "io" + "runtime/debug" + "sync/atomic" // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. _ "crypto/sha256" _ "crypto/sha512" + + "github.com/containerd/log" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -16,10 +20,15 @@ import ( type ReadCloserWrapper struct { io.Reader closer func() error + closed atomic.Bool } // Close calls back the passed closer function func (r *ReadCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("ReadCloserWrapper") + return nil + } return r.closer() } @@ -87,6 +96,7 @@ type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter + closed atomic.Bool } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the @@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) { // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { + if !p.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("cancelReadCloser") + return nil + } p.closeWithError(io.EOF) return nil } + +func subsequentCloseWarn(name string) { + log.G(context.TODO()).Error("subsequent attempt to close " + name) + if log.GetLevel() >= log.DebugLevel { + log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go b/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go deleted file mode 100644 index b3321602c2..0000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package ioutils - -import "github.com/docker/docker/pkg/longpath" - -// TempDir is the equivalent of [os.MkdirTemp], except that on Windows -// the result is in Windows longpath format. On Unix systems it is -// equivalent to [os.MkdirTemp]. -// -// Deprecated: use [longpath.MkdirTemp]. -var TempDir = longpath.MkdirTemp diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 61c679497d..1f50602f28 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,6 +1,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "io" +import ( + "io" + "sync/atomic" +) // NopWriter represents a type which write operation is nop. type NopWriter struct{} @@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error + closed atomic.Bool } func (r *writeCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("WriteCloserWrapper") + return nil + } return r.closer() } diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go deleted file mode 100644 index 4f33ad26bf..0000000000 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo.go +++ /dev/null @@ -1,26 +0,0 @@ -// Package meminfo provides utilites to retrieve memory statistics of -// the host system. -package meminfo - -// Read retrieves memory statistics of the host system and returns a -// Memory type. It is only supported on Linux and Windows, and returns an -// error on other platforms. -func Read() (*Memory, error) { - return readMemInfo() -} - -// Memory contains memory statistics of the host system. -type Memory struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_linux.go deleted file mode 100644 index 0c1cd21d49..0000000000 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -package meminfo - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" -) - -// readMemInfo retrieves memory statistics of the host system and returns a -// Memory type. -func readMemInfo() (*Memory, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a Memory object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*Memory, error) { - meminfo := &Memory{} - scanner := bufio.NewScanner(reader) - memAvailable := int64(-1) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - // Convert to KiB - bytes := int64(size) * 1024 - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "MemAvailable:": - memAvailable = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - } - if memAvailable != -1 { - meminfo.MemFree = memAvailable - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go deleted file mode 100644 index ebfadd5343..0000000000 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux && !windows -// +build !linux,!windows - -package meminfo - -import "errors" - -// readMemInfo is not supported on platforms other than linux and windows. -func readMemInfo() (*Memory, error) { - return nil, errors.New("platform and architecture is not supported") -} diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_windows.go deleted file mode 100644 index aa7d9375be..0000000000 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package meminfo - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// readMemInfo retrieves memory statistics of the host system and returns a -// Memory type. -func readMemInfo() (*Memory, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &Memory{}, nil - } - return &Memory{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go index 3245b74166..505f81bb20 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go index 73f233e782..86c92d5395 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go index 52c20f32aa..9b49e5c55e 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -1,10 +1,11 @@ //go:build linux || freebsd || openbsd -// +build linux freebsd openbsd package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( - "github.com/sirupsen/logrus" + "context" + + "github.com/containerd/log" "golang.org/x/sys/unix" ) @@ -23,7 +24,7 @@ func GetKernelVersion() (*VersionInfo, error) { // the given version. func CheckKernelVersion(k, major, minor int) bool { if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) + log.G(context.TODO()).Warnf("error getting kernel version: %s", err) } else { if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { return false diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go index a04763872a..f514521473 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -21,7 +21,6 @@ func (k *VersionInfo) String() string { // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { - KVI := &VersionInfo{"Unknown", 0, 0, 0} k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go index 156a5ea8ed..ce38c7bde1 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package kernel // import "github.com/docker/docker/pkg/parsers/kernel" diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go index 68066501b0..f7756f2097 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/client.go +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -9,11 +9,11 @@ import ( "net/url" "time" + "github.com/containerd/log" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" ) const ( @@ -26,7 +26,10 @@ const ( dummyHost = "plugin.moby.localhost" ) -func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = transport.VersionMimetype + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (*transport.HTTPTransport, error) { tr := &http.Transport{} if tlsConfig != nil { @@ -77,7 +80,7 @@ func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout tim } // newClientWithTransport creates a new plugin client with a given transport. -func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { +func newClientWithTransport(tr *transport.HTTPTransport, timeout time.Duration) *Client { return &Client{ http: &http.Client{ Transport: tr, @@ -87,15 +90,24 @@ func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Clie } } +// requestFactory defines an interface that transports can implement to +// create new requests. It's used in testing. +type requestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + // Client represents a plugin client. type Client struct { http *http.Client // http client to use - requestFactory transport.RequestFactory + requestFactory requestFactory } // RequestOpts is the set of options that can be passed into a request type RequestOpts struct { Timeout time.Duration + + // testTimeOut is used during tests to limit the max timeout in [abort] + testTimeOut int } // WithRequestTimeout sets a timeout duration for plugin requests @@ -126,7 +138,7 @@ func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret int defer body.Close() if ret != nil { if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + log.G(context.TODO()).Errorf("%s: error reading plugin resp: %v", serviceMethod, err) return err } } @@ -150,7 +162,7 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) } defer body.Close() if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + log.G(context.TODO()).Errorf("%s: error reading plugin resp: %v", serviceMethod, err) return err } return nil @@ -186,11 +198,11 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, } timeOff := backoff(retries) - if abort(start, timeOff) { + if abort(start, timeOff, opts.testTimeOut) { return nil, err } retries++ - logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + log.G(context.TODO()).Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) time.Sleep(timeOff) continue } @@ -238,8 +250,15 @@ func backoff(retries int) time.Duration { return time.Duration(b) * time.Second } -func abort(start time.Time, timeOff time.Duration) bool { - return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +// testNonExistingPlugin is a special plugin-name, which overrides defaultTimeOut in tests. +const testNonExistingPlugin = "this-plugin-does-not-exist" + +func abort(start time.Time, timeOff time.Duration, overrideTimeout int) bool { + to := defaultTimeOut + if overrideTimeout > 0 { + to = overrideTimeout + } + return timeOff+time.Since(start) >= time.Duration(to)*time.Second } func httpScheme(u *url.URL) string { diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go index 04c3779df9..37316ed482 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -13,34 +13,35 @@ import ( "github.com/pkg/errors" ) -var ( - // ErrNotFound plugin not found - ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/docker/plugins" -) +// ErrNotFound plugin not found +var ErrNotFound = errors.New("plugin not found") + +const defaultSocketsPath = "/run/docker/plugins" // LocalRegistry defines a registry that is local (using unix socket). type LocalRegistry struct { - SpecsPaths func() []string + socketsPath string + specsPaths []string } func NewLocalRegistry() LocalRegistry { return LocalRegistry{ - SpecsPaths, + socketsPath: defaultSocketsPath, + specsPaths: specsPaths(), } } // Scan scans all the plugin paths and returns all the names it found func (l *LocalRegistry) Scan() ([]string, error) { var names []string - dirEntries, err := os.ReadDir(socketsPath) + dirEntries, err := os.ReadDir(l.socketsPath) if err != nil && !os.IsNotExist(err) { return nil, errors.Wrap(err, "error reading dir entries") } for _, entry := range dirEntries { if entry.IsDir() { - fi, err := os.Stat(filepath.Join(socketsPath, entry.Name(), entry.Name()+".sock")) + fi, err := os.Stat(filepath.Join(l.socketsPath, entry.Name(), entry.Name()+".sock")) if err != nil { continue } @@ -53,31 +54,30 @@ func (l *LocalRegistry) Scan() ([]string, error) { } } - for _, p := range l.SpecsPaths() { - dirEntries, err := os.ReadDir(p) + for _, p := range l.specsPaths { + dirEntries, err = os.ReadDir(p) if err != nil && !os.IsNotExist(err) { return nil, errors.Wrap(err, "error reading dir entries") } - for _, fi := range dirEntries { - if fi.IsDir() { - infos, err := os.ReadDir(filepath.Join(p, fi.Name())) + for _, entry := range dirEntries { + if entry.IsDir() { + infos, err := os.ReadDir(filepath.Join(p, entry.Name())) if err != nil { continue } for _, info := range infos { - if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { - fi = info + if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == entry.Name() { + entry = info break } } } - ext := filepath.Ext(fi.Name()) - switch ext { + switch ext := filepath.Ext(entry.Name()); ext { case ".spec", ".json": - plugin := strings.TrimSuffix(fi.Name(), ext) + plugin := strings.TrimSuffix(entry.Name(), ext) names = append(names, plugin) default: } @@ -88,21 +88,20 @@ func (l *LocalRegistry) Scan() ([]string, error) { // Plugin returns the plugin registered with the given name (or returns an error). func (l *LocalRegistry) Plugin(name string) (*Plugin, error) { - socketpaths := pluginPaths(socketsPath, name, ".sock") - - for _, p := range socketpaths { + socketPaths := pluginPaths(l.socketsPath, name, ".sock") + for _, p := range socketPaths { if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { return NewLocalPlugin(name, "unix://"+p), nil } } - var txtspecpaths []string - for _, p := range l.SpecsPaths() { - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + var txtSpecPaths []string + for _, p := range l.specsPaths { + txtSpecPaths = append(txtSpecPaths, pluginPaths(p, name, ".spec")...) + txtSpecPaths = append(txtSpecPaths, pluginPaths(p, name, ".json")...) } - for _, p := range txtspecpaths { + for _, p := range txtSpecPaths { if _, err := os.Stat(p); err == nil { if strings.HasSuffix(p, ".json") { return readPluginJSONInfo(name, p) @@ -113,6 +112,25 @@ func (l *LocalRegistry) Plugin(name string) (*Plugin, error) { return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) } +// SpecsPaths returns paths in which to look for plugins, in order of priority. +// +// On Windows: +// +// - "%programdata%\docker\plugins" +// +// On Unix in non-rootless mode: +// +// - "/etc/docker/plugins" +// - "/usr/lib/docker/plugins" +// +// On Unix in rootless-mode: +// +// - "$XDG_CONFIG_HOME/docker/plugins" (or "/etc/docker/plugins" if $XDG_CONFIG_HOME is not set) +// - "$HOME/.local/lib/docker/plugins" (pr "/usr/lib/docker/plugins" if $HOME is set) +func SpecsPaths() []string { + return specsPaths() +} + func readPluginInfo(name, path string) (*Plugin, error) { content, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go index 2c1b2a3193..1a05307b74 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package plugins // import "github.com/docker/docker/pkg/plugins" import ( @@ -10,32 +9,23 @@ import ( ) func rootlessConfigPluginsPath() string { - configHome, err := homedir.GetConfigHome() - if err == nil { + if configHome, err := homedir.GetConfigHome(); err != nil { return filepath.Join(configHome, "docker/plugins") } - return "/etc/docker/plugins" } func rootlessLibPluginsPath() string { - libHome, err := homedir.GetLibHome() - if err == nil { + if libHome, err := homedir.GetLibHome(); err == nil { return filepath.Join(libHome, "docker/plugins") } - return "/usr/lib/docker/plugins" } -// SpecsPaths returns -// { "%programdata%\docker\plugins" } on Windows, -// { "/etc/docker/plugins", "/usr/lib/docker/plugins" } on Unix in non-rootless mode, -// { "$XDG_CONFIG_HOME/docker/plugins", "$HOME/.local/lib/docker/plugins" } on Unix in rootless mode -// with fallback to the corresponding path in non-rootless mode if $XDG_CONFIG_HOME or $HOME is not set. -func SpecsPaths() []string { +// specsPaths is the non-Windows implementation of [SpecsPaths]. +func specsPaths() []string { if rootless.RunningWithRootlessKit() { return []string{rootlessConfigPluginsPath(), rootlessLibPluginsPath()} } - return []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} } diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go index ea5d4be375..fe825792ba 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -5,11 +5,7 @@ import ( "path/filepath" ) -// SpecsPaths returns -// { "%programdata%\docker\plugins" } on Windows, -// { "/etc/docker/plugins", "/usr/lib/docker/plugins" } on Unix in non-rootless mode, -// { "$XDG_CONFIG_HOME/docker/plugins", "$HOME/.local/lib/docker/plugins" } on Unix in rootless mode -// with fallback to the corresponding path in non-rootless mode if $XDG_CONFIG_HOME or $HOME is not set. -func SpecsPaths() []string { +// specsPaths is the Windows implementation of [SpecsPaths]. +func specsPaths() []string { return []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} } diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go index 7d9c5eac22..2efd8508bf 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -23,22 +23,21 @@ package plugins // import "github.com/docker/docker/pkg/plugins" import ( + "context" "errors" "fmt" "sync" "time" + "github.com/containerd/log" "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" ) // ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. const ProtocolSchemeHTTPV1 = "moby.plugins.http/v1" -var ( - // ErrNotImplements is returned if the plugin does not implement the requested driver. - ErrNotImplements = errors.New("Plugin does not implement the requested driver") -) +// ErrNotImplements is returned if the plugin does not implement the requested driver. +var ErrNotImplements = errors.New("Plugin does not implement the requested driver") type plugins struct { sync.Mutex @@ -102,6 +101,12 @@ func (p *Plugin) IsV1() bool { return true } +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} + // NewLocalPlugin creates a new local plugin. func NewLocalPlugin(name, addr string) *Plugin { return &Plugin{ @@ -196,14 +201,14 @@ func (p *Plugin) implements(kind string) bool { return false } -func load(name string) (*Plugin, error) { - return loadWithRetry(name, true) -} - func loadWithRetry(name string, retry bool) (*Plugin, error) { registry := NewLocalRegistry() start := time.Now() - + var testTimeOut int + if name == testNonExistingPlugin { + // override the timeout in tests + testTimeOut = 2 + } var retries int for { pl, err := registry.Plugin(name) @@ -213,11 +218,11 @@ func loadWithRetry(name string, retry bool) (*Plugin, error) { } timeOff := backoff(retries) - if abort(start, timeOff) { + if abort(start, timeOff, testTimeOut) { return nil, err } retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + log.G(context.TODO()).Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) time.Sleep(timeOff) continue } @@ -249,7 +254,7 @@ func get(name string) (*Plugin, error) { if ok { return pl, pl.activate() } - return load(name) + return loadWithRetry(name, true) } // Get returns the plugin given the specified name and requested implementation. @@ -262,7 +267,7 @@ func Get(name, imp string) (*Plugin, error) { return nil, err } if err := pl.waitActive(); err == nil && pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) + log.G(context.TODO()).Debugf("%s implements: %s", name, imp) return pl, nil } return nil, fmt.Errorf("%w: plugin=%q, requested implementation=%q", ErrNotImplements, name, imp) @@ -329,7 +334,7 @@ func (l *LocalRegistry) GetAll(imp string) ([]*Plugin, error) { var out []*Plugin for pl := range chPl { if pl.err != nil { - logrus.Error(pl.err) + log.G(context.TODO()).Error(pl.err) continue } if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go deleted file mode 100644 index 23e9d5715a..0000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package plugins // import "github.com/docker/docker/pkg/plugins" - -// ScopedPath returns the path scoped to the plugin's rootfs. -// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. -func (p *Plugin) ScopedPath(s string) string { - return s -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go deleted file mode 100644 index ddf1d786c6..0000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package plugins // import "github.com/docker/docker/pkg/plugins" - -// ScopedPath returns the path scoped to the plugin's rootfs. -// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. -func (p *Plugin) ScopedPath(s string) string { - return s -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go index 76d3bdb712..e4c1979b8b 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -3,20 +3,21 @@ package transport // import "github.com/docker/docker/pkg/plugins/transport" import ( "io" "net/http" + "strings" ) -// httpTransport holds an http.RoundTripper +// HTTPTransport holds an [http.RoundTripper] // and information about the scheme and address the transport // sends request to. -type httpTransport struct { +type HTTPTransport struct { http.RoundTripper scheme string addr string } -// NewHTTPTransport creates a new httpTransport. -func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { - return httpTransport{ +// NewHTTPTransport creates a new HTTPTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) *HTTPTransport { + return &HTTPTransport{ RoundTripper: r, scheme: scheme, addr: addr, @@ -25,11 +26,15 @@ func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { // NewRequest creates a new http.Request and sets the URL // scheme and address with the transport's fields. -func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { - req, err := newHTTPRequest(path, data) +func (t HTTPTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest(http.MethodPost, path, data) if err != nil { return nil, err } + req.Header.Add("Accept", VersionMimetype) req.URL.Scheme = t.scheme req.URL.Host = t.addr return req, nil diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/mimetype.go b/vendor/github.com/docker/docker/pkg/plugins/transport/mimetype.go new file mode 100644 index 0000000000..b5336d515c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/mimetype.go @@ -0,0 +1,6 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +// VersionMimetype is the Content-Type the engine sends to plugins. +// +// For convenience, there is an alias in [github.com/docker/docker/pkg/plugins.VersionMimetype]. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go deleted file mode 100644 index 6c66cad662..0000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport // import "github.com/docker/docker/pkg/plugins/transport" - -import ( - "io" - "net/http" - "strings" -) - -// VersionMimetype is the Content-Type the engine sends to plugins. -const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" - -// RequestFactory defines an interface that -// transports can implement to create new requests. -type RequestFactory interface { - NewRequest(path string, data io.Reader) (*http.Request, error) -} - -// Transport defines an interface that plugin transports -// must implement. -type Transport interface { - http.RoundTripper - RequestFactory -} - -// newHTTPRequest creates a new request with a path and a body. -func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - req, err := http.NewRequest(http.MethodPost, path, data) - if err != nil { - return nil, err - } - req.Header.Add("Accept", VersionMimetype) - return req, nil -} diff --git a/vendor/github.com/docker/docker/pkg/process/doc.go b/vendor/github.com/docker/docker/pkg/process/doc.go deleted file mode 100644 index dae536d7db..0000000000 --- a/vendor/github.com/docker/docker/pkg/process/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package process provides a set of basic functions to manage individual -// processes. -package process diff --git a/vendor/github.com/docker/docker/pkg/process/process_unix.go b/vendor/github.com/docker/docker/pkg/process/process_unix.go deleted file mode 100644 index daf3923626..0000000000 --- a/vendor/github.com/docker/docker/pkg/process/process_unix.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build !windows -// +build !windows - -package process - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -// Alive returns true if process with a given pid is running. It only considers -// positive PIDs; 0 (all processes in the current process group), -1 (all processes -// with a PID larger than 1), and negative (-n, all processes in process group -// "n") values for pid are never considered to be alive. -func Alive(pid int) bool { - if pid < 1 { - return false - } - switch runtime.GOOS { - case "darwin": - // OS X does not have a proc filesystem. Use kill -0 pid to judge if the - // process exists. From KILL(2): https://www.freebsd.org/cgi/man.cgi?query=kill&sektion=2&manpath=OpenDarwin+7.2.1 - // - // Sig may be one of the signals specified in sigaction(2) or it may - // be 0, in which case error checking is performed but no signal is - // actually sent. This can be used to check the validity of pid. - err := unix.Kill(pid, 0) - - // Either the PID was found (no error) or we get an EPERM, which means - // the PID exists, but we don't have permissions to signal it. - return err == nil || err == unix.EPERM - default: - _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))) - return err == nil - } -} - -// Kill force-stops a process. It only considers positive PIDs; 0 (all processes -// in the current process group), -1 (all processes with a PID larger than 1), -// and negative (-n, all processes in process group "n") values for pid are -// ignored. Refer to [KILL(2)] for details. -// -// [KILL(2)]: https://man7.org/linux/man-pages/man2/kill.2.html -func Kill(pid int) error { - if pid < 1 { - return fmt.Errorf("invalid PID (%d): only positive PIDs are allowed", pid) - } - err := unix.Kill(pid, unix.SIGKILL) - if err != nil && err != unix.ESRCH { - return err - } - return nil -} - -// Zombie return true if process has a state with "Z". It only considers positive -// PIDs; 0 (all processes in the current process group), -1 (all processes with -// a PID larger than 1), and negative (-n, all processes in process group "n") -// values for pid are ignored. Refer to [PROC(5)] for details. -// -// [PROC(5)]: https://man7.org/linux/man-pages/man5/proc.5.html -func Zombie(pid int) (bool, error) { - if pid < 1 { - return false, nil - } - data, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", pid)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - if cols := bytes.SplitN(data, []byte(" "), 4); len(cols) >= 3 && string(cols[2]) == "Z" { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/process/process_windows.go b/vendor/github.com/docker/docker/pkg/process/process_windows.go deleted file mode 100644 index 26158d09ec..0000000000 --- a/vendor/github.com/docker/docker/pkg/process/process_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -package process - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// Alive returns true if process with a given pid is running. -func Alive(pid int) bool { - h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) - if err != nil { - return false - } - var c uint32 - err = windows.GetExitCodeProcess(h, &c) - _ = windows.CloseHandle(h) - if err != nil { - // From the GetExitCodeProcess function (processthreadsapi.h) API docs: - // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess - // - // The GetExitCodeProcess function returns a valid error code defined by the - // application only after the thread terminates. Therefore, an application should - // not use STILL_ACTIVE (259) as an error code (STILL_ACTIVE is a macro for - // STATUS_PENDING (minwinbase.h)). If a thread returns STILL_ACTIVE (259) as - // an error code, then applications that test for that value could interpret it - // to mean that the thread is still running, and continue to test for the - // completion of the thread after the thread has terminated, which could put - // the application into an infinite loop. - return c == uint32(windows.STATUS_PENDING) - } - return true -} - -// Kill force-stops a process. -func Kill(pid int) error { - p, err := os.FindProcess(pid) - if err == nil { - err = p.Kill() - if err != nil && err != os.ErrProcessDone { - return err - } - } - return nil -} - -// Zombie is not supported on Windows. -// -// TODO(thaJeztah): remove once we remove the stubs from pkg/system. -func Zombie(_ int) (bool, error) { - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go index b90043052e..0df5195e70 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -1,5 +1,4 @@ //go:build freebsd || darwin -// +build freebsd darwin package reexec // import "github.com/docker/docker/pkg/reexec" diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go index 7175853a55..bec9761701 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows && !freebsd && !darwin -// +build !linux,!windows,!freebsd,!darwin package reexec // import "github.com/docker/docker/pkg/reexec" diff --git a/vendor/github.com/docker/docker/pkg/rootless/rootless.go b/vendor/github.com/docker/docker/pkg/rootless/rootless.go index f407f0fc42..b52f8eee71 100644 --- a/vendor/github.com/docker/docker/pkg/rootless/rootless.go +++ b/vendor/github.com/docker/docker/pkg/rootless/rootless.go @@ -1,12 +1,6 @@ package rootless // import "github.com/docker/docker/pkg/rootless" -import ( - "os" - "path/filepath" - - "github.com/pkg/errors" - "github.com/rootless-containers/rootlesskit/pkg/api/client" -) +import "os" // RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy const RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy" @@ -15,13 +9,3 @@ const RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy" func RunningWithRootlessKit() bool { return os.Getenv("ROOTLESSKIT_STATE_DIR") != "" } - -// GetRootlessKitClient returns RootlessKit client -func GetRootlessKitClient() (client.Client, error) { - stateDir := os.Getenv("ROOTLESSKIT_STATE_DIR") - if stateDir == "" { - return nil, errors.New("environment variable `ROOTLESSKIT_STATE_DIR` is not set") - } - apiSock := filepath.Join(stateDir, "api.sock") - return client.New(apiSock) -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/cgroup2_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/cgroup2_linux.go index 151179ca32..2290c2ce8b 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/cgroup2_linux.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/cgroup2_linux.go @@ -1,6 +1,7 @@ package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( + "context" "os" "path" "strings" @@ -8,7 +9,7 @@ import ( "github.com/containerd/cgroups/v3" cgroupsV2 "github.com/containerd/cgroups/v3/cgroup2" "github.com/containerd/containerd/pkg/userns" - "github.com/sirupsen/logrus" + "github.com/containerd/log" ) func newV2(options ...Opt) *SysInfo { @@ -29,12 +30,12 @@ func newV2(options ...Opt) *SysInfo { m, err := cgroupsV2.Load(sysInfo.cg2GroupPath) if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } else { sysInfo.cg2Controllers = make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } for _, c := range controllers { sysInfo.cg2Controllers[c] = struct{}{} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_other.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_other.go index fb31cae649..fcafd56ae3 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_other.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_other.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package sysinfo diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go index 215d7ff302..59bf0d278a 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -1,6 +1,7 @@ package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( + "context" "fmt" "os" "path" @@ -10,8 +11,8 @@ import ( "github.com/containerd/cgroups/v3" "github.com/containerd/cgroups/v3/cgroup1" "github.com/containerd/containerd/pkg/seccomp" + "github.com/containerd/log" "github.com/moby/sys/mountinfo" - "github.com/sirupsen/logrus" ) var ( @@ -107,7 +108,7 @@ func newV1() *SysInfo { sysInfo.cgMounts, err = findCgroupV1Mountpoints() if err != nil { - logrus.Warn(err) + log.G(context.TODO()).Warn(err) } else { ops = append(ops, applyMemoryCgroupInfo, diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_other.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_other.go index aa97c0f29a..37742db781 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_other.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package sysinfo // import "github.com/docker/docker/pkg/sysinfo" diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go index 84ae157051..92ff02097d 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go index 2573d71622..f4bbcce744 100644 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -1,13 +1,6 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "errors" -) +import "errors" -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") - - // ErrNotSupportedOperatingSystem means the operating system is not supported. - ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") -) +// ErrNotSupportedPlatform means the platform is not supported. +var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go index 3801129404..f01f9385e1 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go deleted file mode 100644 index e3de86be29..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/image_os.go +++ /dev/null @@ -1,10 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" -import ( - "runtime" - "strings" -) - -// IsOSSupported determines if an operating system is supported by the host. -func IsOSSupported(os string) bool { - return strings.EqualFold(runtime.GOOS, os) -} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go b/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go new file mode 100644 index 0000000000..afb57dae6a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go @@ -0,0 +1,19 @@ +package system + +import ( + "errors" + "runtime" + "strings" +) + +// ErrNotSupportedOperatingSystem means the operating system is not supported. +// +// Deprecated: use [github.com/docker/docker/image.CheckOS] and check the error returned. +var ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") + +// IsOSSupported determines if an operating system is supported by the host. +// +// Deprecated: use [github.com/docker/docker/image.CheckOS] and check the error returned. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index 3c2a43ddbd..7603efbbd8 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,9 +1,7 @@ package system // import "github.com/docker/docker/pkg/system" -var ( - // containerdRuntimeSupported determines if containerd should be the runtime. - containerdRuntimeSupported = false -) +// containerdRuntimeSupported determines if containerd should be the runtime. +var containerdRuntimeSupported = false // InitContainerdRuntime sets whether to use containerd for runtime on Windows. func InitContainerdRuntime(cdPath string) { diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 654b9f2c9e..5e29a6b3b8 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go b/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go deleted file mode 100644 index 216519923e..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go +++ /dev/null @@ -1,16 +0,0 @@ -package system - -import "github.com/docker/docker/pkg/meminfo" - -// MemInfo contains memory statistics of the host system. -// -// Deprecated: use [meminfo.Memory]. -type MemInfo = meminfo.Memory - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -// -// Deprecated: use [meminfo.Read]. -func ReadMemInfo() (*meminfo.Memory, error) { - return meminfo.Read() -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index d27152c0f5..2a62237a45 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go index c890be116f..e218e742d4 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go index 4586aad19e..34df0b9236 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -1,5 +1,4 @@ //go:build !freebsd && !windows -// +build !freebsd,!windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index ec89d7a15e..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_deprecated.go b/vendor/github.com/docker/docker/pkg/system/path_deprecated.go deleted file mode 100644 index 5c95026c3d..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_deprecated.go +++ /dev/null @@ -1,18 +0,0 @@ -package system - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -// For Windows containers, an empty string is returned as the default -// path will be set by the container, and Docker has no context of what the -// default path should be. -// -// Deprecated: use oci.DefaultPathEnv -func DefaultPathEnv(os string) string { - if os == "windows" { - return "" - } - return defaultUnixPathEnv -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_deprecated.go b/vendor/github.com/docker/docker/pkg/system/process_deprecated.go deleted file mode 100644 index 7b9f19acd5..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/process_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build linux || freebsd || darwin || windows -// +build linux freebsd darwin windows - -package system - -import "github.com/docker/docker/pkg/process" - -var ( - // IsProcessAlive returns true if process with a given pid is running. - // - // Deprecated: use [process.Alive]. - IsProcessAlive = process.Alive - - // IsProcessZombie return true if process has a state with "Z" - // - // Deprecated: use [process.Zombie]. - // - // TODO(thaJeztah): remove the Windows implementation in process once we remove this stub. - IsProcessZombie = process.Zombie -) - -// KillProcess force-stops a process. -// -// Deprecated: use [process.Kill]. -func KillProcess(pid int) { - _ = process.Kill(pid) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go index 8e61d820f0..435b776ee3 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || netbsd -// +build freebsd netbsd package system // import "github.com/docker/docker/pkg/system" @@ -7,10 +6,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go index c1c0ee9f38..e0b629df0e 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 3ac02393f0..4309d42b9f 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -4,13 +4,15 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, // the type is 32bit on mips rdev: uint64(s.Rdev), //nolint: unconvert - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go index 756b92d1e6..851374e5d9 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index a45ffddf75..205e54677d 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 0ff3af2fa1..10876cd73e 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -45,5 +45,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) { return &StatT{ size: (*fi).Size(), mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil + mtim: (*fi).ModTime(), + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go index 2768750a00..f3a079f887 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go index bfed4af032..7c19d59156 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs.go b/vendor/github.com/docker/docker/pkg/system/xattrs.go new file mode 100644 index 0000000000..b3f4e8a21f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +type XattrError struct { + Op string + Attr string + Path string + Err error +} + +func (e *XattrError) Error() string { return e.Op + " " + e.Attr + " " + e.Path + ": " + e.Err.Error() } + +func (e *XattrError) Unwrap() error { return e.Err } + +// Timeout reports whether this error represents a timeout. +func (e *XattrError) Timeout() bool { + t, ok := e.Err.(interface{ Timeout() bool }) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index 95b609fe7a..facfbb3126 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -1,11 +1,17 @@ package system // import "github.com/docker/docker/pkg/system" -import "golang.org/x/sys/unix" +import ( + "golang.org/x/sys/unix" +) // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { + sysErr := func(err error) ([]byte, error) { + return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err} + } + // Start with a 128 length byte array dest := make([]byte, 128) sz, errno := unix.Lgetxattr(path, attr, dest) @@ -14,7 +20,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) { // Buffer too small, use zero-sized buffer to get the actual size sz, errno = unix.Lgetxattr(path, attr, []byte{}) if errno != nil { - return nil, errno + return sysErr(errno) } dest = make([]byte, sz) sz, errno = unix.Lgetxattr(path, attr, dest) @@ -24,7 +30,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) { case errno == unix.ENODATA: return nil, nil case errno != nil: - return nil, errno + return sysErr(errno) } return dest[:sz], nil @@ -33,5 +39,9 @@ func Lgetxattr(path string, attr string) ([]byte, error) { // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) + err := unix.Lsetxattr(path, attr, data, flags) + if err != nil { + return &XattrError{Op: "lsetxattr", Attr: attr, Path: path, Err: err} + } + return nil } diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go index b165a5dbfe..2a3698f129 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go index 01d4ed59b2..33e07b4793 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -27,9 +27,11 @@ type fileInfoSum struct { func (fis fileInfoSum) Name() string { return fis.name } + func (fis fileInfoSum) Sum() string { return fis.sum } + func (fis fileInfoSum) Pos() int64 { return fis.pos } diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go index 87a8450aea..8bd2850f8e 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -137,13 +137,11 @@ type tHashConfig struct { hash crypto.Hash } -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) +// NOTE: DO NOT include MD5 or SHA1, which are considered insecure. +var standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, +} // DefaultTHash is default TarSum hashing algorithm - "sha256". var DefaultTHash = NewTHash("sha256", sha256.New) diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go index edc3ec8cfd..890b70d455 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -115,15 +115,29 @@ func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) + const paxSchilyXattr = "SCHILY.xattr." + var xattrs [][2]string + for k, v := range h.PAXRecords { + if xattr, ok := strings.CutPrefix(k, paxSchilyXattr); ok { + // h.Xattrs keys take precedence over h.PAXRecords keys, like + // archive/tar does when writing. + if vv, ok := h.Xattrs[xattr]; ok { //nolint:staticcheck // field deprecated in stdlib + v = vv + } + xattrs = append(xattrs, [2]string{xattr, v}) + } } - sort.Strings(xAttrKeys) + // Get extended attributes which are not in PAXRecords. + for k, v := range h.Xattrs { //nolint:staticcheck // field deprecated in stdlib + if _, ok := h.PAXRecords[paxSchilyXattr+k]; !ok { + xattrs = append(xattrs, [2]string{k, v}) + } + } + sort.Slice(xattrs, func(i, j int) bool { return xattrs[i][0] < xattrs[j][0] }) // Make the slice with enough capacity to hold the 11 basic headers // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + orderedHeaders = make([][2]string, 0, 11+len(xattrs)) // Copy all headers from v0 excluding the 'mtime' header (the 5th element). v0headers := v0TarHeaderSelect(h) @@ -131,9 +145,7 @@ func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { orderedHeaders = append(orderedHeaders, v0headers[6:]...) // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } + orderedHeaders = append(orderedHeaders, xattrs...) return } diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go index 0c86b88fc3..82f973ffc9 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -27,7 +27,7 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { } execRoot = filepath.Join(execRoot, p.PluginObj.ID) - if err := os.MkdirAll(execRoot, 0700); err != nil { + if err := os.MkdirAll(execRoot, 0o700); err != nil { return nil, errors.WithStack(err) } diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go index 1b08aec171..022a604b06 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package v2 // import "github.com/docker/docker/plugin/v2" diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go index 6b29397051..1ba361123f 100644 --- a/vendor/github.com/docker/docker/runconfig/config_unix.go +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package runconfig // import "github.com/docker/docker/runconfig" diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go index 9522a2e0f7..a0ca32a7b9 100644 --- a/vendor/github.com/docker/docker/runconfig/errors.go +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -31,6 +31,8 @@ const ( ErrUnsupportedNetworkAndAlias validationError = "network-scoped alias is supported only for containers in user defined networks" // ErrConflictUTSHostname conflict between the hostname and the UTS mode ErrConflictUTSHostname validationError = "conflicting options: hostname and the UTS mode" + // ErrEmptyConfig when container config is nil + ErrEmptyConfig validationError = "config cannot be empty in order to create a container" ) type validationError string diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go index 9603a94eca..8a9e65f1a2 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" ) // DecodeHostConfig creates a HostConfig based on the specified Reader. @@ -23,7 +24,7 @@ func decodeHostConfig(src io.Reader) (*container.HostConfig, error) { // docker daemon. func SetDefaultNetModeIfBlank(hc *container.HostConfig) { if hc != nil && hc.NetworkMode == "" { - hc.NetworkMode = "default" + hc.NetworkMode = network.NetworkDefault } } @@ -53,10 +54,6 @@ func validateNetContainerMode(c *container.Config, hc *container.HostConfig) err return ErrConflictNetworkHosts } - if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { - return ErrConflictContainerNetworkAndMac - } - if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) { return ErrConflictNetworkPublishPorts } diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go index bbe7026349..0a60c9b805 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package runconfig // import "github.com/docker/docker/runconfig" @@ -8,13 +7,14 @@ import ( "runtime" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/pkg/sysinfo" ) // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. func DefaultDaemonNetworkMode() container.NetworkMode { - return "bridge" + return network.NetworkBridge } // IsPreDefinedNetwork indicates if a network is predefined by the daemon diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go index 91e27eac5e..579b9787d6 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -4,13 +4,14 @@ import ( "fmt" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/pkg/sysinfo" ) // DefaultDaemonNetworkMode returns the default network stack the daemon should // use. func DefaultDaemonNetworkMode() container.NetworkMode { - return "nat" + return network.NetworkNat } // IsPreDefinedNetwork indicates if a network is predefined by the daemon diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go index 17bc3c9878..1b64c23935 100644 --- a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go +++ b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -30,6 +30,7 @@ func linuxValidateNotRoot(p string) error { } return nil } + func linuxValidateAbsolute(p string) error { p = strings.ReplaceAll(p, `\`, `/`) if path.IsAbs(p) { @@ -37,12 +38,14 @@ func linuxValidateAbsolute(p string) error { } return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) } + func (p *linuxParser) ValidateMountConfig(mnt *mount.Mount) error { // there was something looking like a bug in existing codebase: // - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw // - but not when calling ParseMountSpec directly... nor when the unit test called it directly return p.validateMountConfigImpl(mnt, true) } + func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { if len(mnt.Target) == 0 { return &errMountConfig{mnt, errMissingField("Target")} @@ -123,6 +126,7 @@ var linuxConsistencyModes = map[mount.Consistency]bool{ mount.ConsistencyCached: true, mount.ConsistencyDelegated: true, } + var linuxPropagationModes = map[mount.Propagation]bool{ mount.PropagationPrivate: true, mount.PropagationRPrivate: true, @@ -271,9 +275,11 @@ func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, erro } return mp, err } + func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { return p.parseMountSpec(cfg, true) } + func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { return nil, err @@ -394,6 +400,7 @@ func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool func (p *linuxParser) DefaultCopyMode() bool { return true } + func (p *linuxParser) ValidateVolumeName(name string) error { return nil } diff --git a/vendor/github.com/docker/docker/volume/mounts/mounts.go b/vendor/github.com/docker/docker/volume/mounts/mounts.go index bc90bb9def..74caf015ff 100644 --- a/vendor/github.com/docker/docker/volume/mounts/mounts.go +++ b/vendor/github.com/docker/docker/volume/mounts/mounts.go @@ -7,14 +7,13 @@ import ( "path/filepath" "syscall" - "github.com/containerd/containerd/log" + "github.com/containerd/log" mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // MountPoint is the intersection point between a volume and a container. It @@ -62,7 +61,7 @@ type MountPoint struct { // This should be set by calls to `Mount` and unset by calls to `Unmount` ID string `json:",omitempty"` - // Sepc is a copy of the API request that created this mount. + // Spec is a copy of the API request that created this mount. Spec mounttypes.Mount // Some bind mounts should not be automatically created. @@ -156,7 +155,7 @@ func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it - if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(m.Source, 0o755, rootIDs); err != nil { if perr, ok := err.(*os.PathError); ok { if perr.Err != syscall.ENOTDIR { return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) @@ -169,7 +168,7 @@ func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun func (m *MountPoint) LiveRestore(ctx context.Context) error { if m.Volume == nil { - logrus.Debug("No volume to restore") + log.G(ctx).Debug("No volume to restore") return nil } diff --git a/vendor/github.com/docker/docker/volume/mounts/parser.go b/vendor/github.com/docker/docker/volume/mounts/parser.go index 58107f490c..2bcf9ab053 100644 --- a/vendor/github.com/docker/docker/volume/mounts/parser.go +++ b/vendor/github.com/docker/docker/volume/mounts/parser.go @@ -14,7 +14,7 @@ var ErrVolumeTargetIsRoot = errors.New("invalid specification: destination can't // read-write modes var rwModes = map[string]bool{ "rw": true, - "ro": true, + "ro": true, // attempts recursive read-only if possible } // Parser represents a platform specific parser for mount expressions diff --git a/vendor/github.com/docker/docker/volume/mounts/validate.go b/vendor/github.com/docker/docker/volume/mounts/validate.go index 9fc9109021..f40438290d 100644 --- a/vendor/github.com/docker/docker/volume/mounts/validate.go +++ b/vendor/github.com/docker/docker/volume/mounts/validate.go @@ -23,6 +23,7 @@ func errBindSourceDoesNotExist(path string) error { func errExtraField(name string) error { return errors.Errorf("field %s must not be specified", name) } + func errMissingField(name string) error { return errors.Errorf("field %s must not be empty", name) } diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go index 92eadce518..8556bbe0d2 100644 --- a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go +++ b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin package mounts // import "github.com/docker/docker/volume/mounts" diff --git a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go index 7800aa8c08..f9f0f08f44 100644 --- a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go +++ b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go @@ -191,6 +191,7 @@ func (p *windowsParser) ValidateVolumeName(name string) error { } return nil } + func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { return p.validateMountConfigReg(mnt, windowsValidators) } @@ -199,8 +200,7 @@ type fileInfoProvider interface { fileInfo(path string) (exist, isDir bool, err error) } -type defaultFileInfoProvider struct { -} +type defaultFileInfoProvider struct{} func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { fi, err := os.Stat(path) diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index dde75389f1..43de486775 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,14 @@ This package provides various compression algorithms. # changelog +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 @@ -646,6 +654,7 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. * [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. * [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. # license diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 9819d41453..858f8f43a5 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -197,12 +197,13 @@ encodeLoop: // Set m to a match at offset if it looks like that will improve compression. improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } if debugAsserts { - if offset <= 0 { - panic(offset) + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) @@ -343,8 +344,8 @@ encodeLoop: if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s <= nextEmit { - panic("s <= nextEmit") + if debugAsserts && s < nextEmit { + panic("s < nextEmit") } addLiterals(&seq, best.s) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go index a527175b73..66e50d8aad 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -156,14 +156,7 @@ func (bf *BFlags) Parse() error { return nil } - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } + arg, value, hasValue := strings.Cut(arg[2:], "=") flag, ok := bf.flags[arg] if !ok { @@ -180,27 +173,27 @@ func (bf *BFlags) Parse() error { switch flag.flagType { case boolType: // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { + if hasValue && value == "" { return errors.Errorf("missing a value on flag: %s", arg) } - lower := strings.ToLower(value) - if lower == "" { + switch strings.ToLower(value) { + case "true", "": flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { + case "false": + flag.Value = "false" + default: return errors.Errorf("expecting boolean value for flag %s, not: %s", arg, value) } case stringType: - if index < 0 { + if !hasValue { return errors.Errorf("missing a value on flag: %s", arg) } flag.Value = value case stringsType: - if index < 0 { + if !hasValue { return errors.Errorf("missing a value on flag: %s", arg) } flag.StringValues = append(flag.StringValues, value) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go index e328b27bc7..34e8fcc91d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -11,13 +11,17 @@ import ( "github.com/pkg/errors" ) -const MountTypeBind = "bind" -const MountTypeCache = "cache" -const MountTypeTmpfs = "tmpfs" -const MountTypeSecret = "secret" -const MountTypeSSH = "ssh" +type MountType string + +const ( + MountTypeBind MountType = "bind" + MountTypeCache MountType = "cache" + MountTypeTmpfs MountType = "tmpfs" + MountTypeSecret MountType = "secret" + MountTypeSSH MountType = "ssh" +) -var allowedMountTypes = map[string]struct{}{ +var allowedMountTypes = map[MountType]struct{}{ MountTypeBind: {}, MountTypeCache: {}, MountTypeTmpfs: {}, @@ -25,11 +29,15 @@ var allowedMountTypes = map[string]struct{}{ MountTypeSSH: {}, } -const MountSharingShared = "shared" -const MountSharingPrivate = "private" -const MountSharingLocked = "locked" +type ShareMode string + +const ( + MountSharingShared ShareMode = "shared" + MountSharingPrivate ShareMode = "private" + MountSharingLocked ShareMode = "locked" +) -var allowedSharingTypes = map[string]struct{}{ +var allowedSharingModes = map[ShareMode]struct{}{ MountSharingShared: {}, MountSharingPrivate: {}, MountSharingLocked: {}, @@ -44,31 +52,18 @@ func init() { parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) } -func isValidMountType(s string) bool { - if s == "secret" { - if !isSecretMountsSupported() { - return false - } +func allShareModes() []string { + types := make([]string, 0, len(allowedSharingModes)) + for k := range allowedSharingModes { + types = append(types, string(k)) } - if s == "ssh" { - if !isSSHMountsSupported() { - return false - } - } - _, ok := allowedMountTypes[s] - return ok + return types } func allMountTypes() []string { - types := make([]string, 0, len(allowedMountTypes)+2) + types := make([]string, 0, len(allowedMountTypes)) for k := range allowedMountTypes { - types = append(types, k) - } - if isSecretMountsSupported() { - types = append(types, "secret") - } - if isSSHMountsSupported() { - types = append(types, "ssh") + types = append(types, string(k)) } return types } @@ -119,22 +114,22 @@ type mountState struct { } type Mount struct { - Type string + Type MountType From string Source string Target string ReadOnly bool SizeLimit int64 CacheID string - CacheSharing string + CacheSharing ShareMode Required bool Mode *uint64 UID *uint64 GID *uint64 } -func parseMount(value string, expander SingleWordExpander) (*Mount, error) { - csvReader := csv.NewReader(strings.NewReader(value)) +func parseMount(val string, expander SingleWordExpander) (*Mount, error) { + csvReader := csv.NewReader(strings.NewReader(val)) fields, err := csvReader.Read() if err != nil { return nil, errors.Wrap(err, "failed to parse csv mounts") @@ -145,10 +140,10 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { roAuto := true for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) + key, value, ok := strings.Cut(field, "=") + key = strings.ToLower(key) - if len(parts) == 1 { + if !ok { if expander == nil { continue // evaluate later } @@ -162,27 +157,24 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { roAuto = false continue case "required": - if m.Type == "secret" || m.Type == "ssh" { + if m.Type == MountTypeSecret || m.Type == MountTypeSSH { m.Required = true continue } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } + default: + // any other option requires a value. + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) } } - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] // check for potential variable if expander != nil { - processed, err := expander(value) + value, err = expander(value) if err != nil { return nil, err } - value = processed } else if key == "from" { if matched, err := regexp.MatchString(`\$.`, value); err != nil { //nolint return nil, err @@ -196,10 +188,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { switch key { case "type": - if !isValidMountType(strings.ToLower(value)) { + v := MountType(strings.ToLower(value)) + if _, ok := allowedMountTypes[v]; !ok { return nil, suggest.WrapError(errors.Errorf("unsupported mount type %q", value), value, allMountTypes(), true) } - m.Type = strings.ToLower(value) + m.Type = v case "from": m.From = value case "source", "src": @@ -220,17 +213,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { m.ReadOnly = !rw roAuto = false case "required": - if m.Type == "secret" || m.Type == "ssh" { - v, err := strconv.ParseBool(value) + if m.Type == MountTypeSecret || m.Type == MountTypeSSH { + m.Required, err = strconv.ParseBool(value) if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) } - m.Required = v } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } case "size": - if m.Type == "tmpfs" { + if m.Type == MountTypeTmpfs { m.SizeLimit, err = units.RAMInBytes(value) if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) @@ -241,10 +233,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { case "id": m.CacheID = value case "sharing": - if _, ok := allowedSharingTypes[strings.ToLower(value)]; !ok { - return nil, errors.Errorf("unsupported sharing value %q", value) + v := ShareMode(strings.ToLower(value)) + if _, ok := allowedSharingModes[v]; !ok { + return nil, suggest.WrapError(errors.Errorf("unsupported sharing value %q", value), value, allShareModes(), true) } - m.CacheSharing = strings.ToLower(value) + m.CacheSharing = v case "mode": mode, err := strconv.ParseUint(value, 8, 32) if err != nil { @@ -273,16 +266,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH || m.Type == MountTypeCache - if m.Mode != nil && !fileInfoAllowed { - return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) - } - - if m.UID != nil && !fileInfoAllowed { - return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) - } - - if m.GID != nil && !fileInfoAllowed { - return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + if !fileInfoAllowed { + if m.Mode != nil { + return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) + } + if m.UID != nil { + return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) + } + if m.GID != nil { + return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + } } if roAuto { @@ -293,10 +286,6 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } } - if m.CacheSharing != "" && m.Type != MountTypeCache { - return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) - } - if m.Type == MountTypeSecret { if m.From != "" { return nil, errors.Errorf("secret mount should not have a from") @@ -312,5 +301,9 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } } + if m.CacheSharing != "" && m.Type != MountTypeCache { + return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) + } + return m, nil } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go index 142c3075b5..0ced44dae6 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go @@ -4,13 +4,15 @@ import ( "github.com/pkg/errors" ) +type NetworkMode = string + const ( - NetworkDefault = "default" - NetworkNone = "none" - NetworkHost = "host" + NetworkDefault NetworkMode = "default" + NetworkNone NetworkMode = "none" + NetworkHost NetworkMode = "host" ) -var allowedNetwork = map[string]struct{}{ +var allowedNetwork = map[NetworkMode]struct{}{ NetworkDefault: {}, NetworkNone: {}, NetworkHost: {}, @@ -51,7 +53,7 @@ func runNetworkPostHook(cmd *RunCommand, req parseRequest) error { return nil } -func GetNetwork(cmd *RunCommand) string { +func GetNetwork(cmd *RunCommand) NetworkMode { return cmd.getExternalValue(networkKey).(*networkState).networkMode } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go deleted file mode 100644 index 2b4140b72a..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go +++ /dev/null @@ -1,5 +0,0 @@ -package instructions - -func isSecretMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go deleted file mode 100644 index 0e4e5f38c7..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go +++ /dev/null @@ -1,5 +0,0 @@ -package instructions - -func isSSHMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 6c362fc6fa..5e03f84243 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -357,12 +357,13 @@ func parseFrom(req parseRequest) (*Stage, error) { }, nil } -func parseBuildStageName(args []string) (string, error) { - stageName := "" +var validStageName = regexp.MustCompile("^[a-z][a-z0-9-_.]*$") + +func parseBuildStageName(args []string) (stageName string, err error) { switch { case len(args) == 3 && strings.EqualFold(args[1], "as"): stageName = strings.ToLower(args[2]) - if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + if !validStageName.MatchString(stageName) { return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", args[2]) } case len(args) != 1: @@ -543,6 +544,7 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { flInterval := req.flags.AddString("interval", "") flTimeout := req.flags.AddString("timeout", "") flStartPeriod := req.flags.AddString("start-period", "") + flStartInterval := req.flags.AddString("start-interval", "") flRetries := req.flags.AddString("retries", "") if err := req.flags.Parse(); err != nil { @@ -583,6 +585,12 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { } healthcheck.StartPeriod = startPeriod + startInterval, err := parseOptInterval(flStartInterval) + if err != nil { + return nil, err + } + healthcheck.StartInterval = startInterval + if flRetries.Value != "" { retries, err := strconv.ParseInt(flRetries.Value, 10, 32) if err != nil { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index d6723635d4..4a6129fdc8 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -49,8 +49,7 @@ func (node *Node) Location() []Range { // Dump dumps the AST defined by `node` as a list of sexps. // Returns a string suitable for printing. func (node *Node) Dump() string { - str := "" - str += strings.ToLower(node.Value) + str := strings.ToLower(node.Value) if len(node.Flags) > 0 { str += fmt.Sprintf(" %q", node.Flags) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go index bf0887f236..f9aca5d9ef 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -4,8 +4,8 @@ package shell // EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. func EqualEnvKeys(from, to string) bool { return from == to } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go index 010569bbaa..7bbed9b207 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go @@ -3,8 +3,8 @@ package shell import "strings" // EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. func EqualEnvKeys(from, to string) bool { - return strings.ToUpper(from) == strings.ToUpper(to) + return strings.EqualFold(from, to) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index b930ab3260..80806f8ba7 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -335,39 +335,23 @@ func (sw *shellWord) processDollar() (string, error) { } name := sw.processName() ch := sw.scanner.Next() + chs := string(ch) + nullIsUnset := false + switch ch { case '}': // Normal ${xx} case - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { + value, set := sw.getEnv(name) + if !set && sw.skipUnsetEnv { return fmt.Sprintf("${%s}", name), nil } return value, nil - case '?': - word, _, err := sw.processStopOn('}') - if err != nil { - if sw.scanner.Peek() == scanner.EOF { - return "", errors.New("syntax error: missing '}'") - } - return "", err - } - newValue, found := sw.getEnv(name) - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s?%s}", name, word), nil - } - message := "is not allowed to be unset" - if word != "" { - message = word - } - return "", errors.Errorf("%s: %s", name, message) - } - return newValue, nil case ':': - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - modifier := sw.scanner.Next() - + nullIsUnset = true + ch = sw.scanner.Next() + chs += string(ch) + fallthrough + case '+', '-', '?': word, _, err := sw.processStopOn('}') if err != nil { if sw.scanner.Peek() == scanner.EOF { @@ -378,53 +362,44 @@ func (sw *shellWord) processDollar() (string, error) { // Grab the current value of the variable in question so we // can use it to determine what to do based on the modifier - newValue, found := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - return newValue, nil + value, set := sw.getEnv(name) + if sw.skipUnsetEnv && !set { + return fmt.Sprintf("${%s%s%s}", name, chs, word), nil + } + switch ch { case '-': - if newValue == "" { - newValue = word + if !set || (nullIsUnset && value == "") { + return word, nil } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil + return value, nil + case '+': + if !set || (nullIsUnset && value == "") { + return "", nil } - - return newValue, nil - + return word, nil case '?': - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } + if !set { message := "is not allowed to be unset" if word != "" { message = word } return "", errors.Errorf("%s: %s", name, message) } - if newValue == "" { + if nullIsUnset && value == "" { message := "is not allowed to be empty" if word != "" { message = word } return "", errors.Errorf("%s: %s", name, message) } - return newValue, nil - + return value, nil default: - return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } + default: + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } - return "", errors.Errorf("missing ':' in substitution") } func (sw *shellWord) processName() string { diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index 18d03630b4..fb9fc3ddf5 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/pkg/errors" ) diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index c4a73a68f4..43809d4876 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.11.4 // source: stack.proto diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE deleted file mode 100644 index 27448585ad..0000000000 --- a/vendor/github.com/opencontainers/runc/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE deleted file mode 100644 index 5c97abce4b..0000000000 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index f95c1409fc..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,157 +0,0 @@ -//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" - "strconv" - - "golang.org/x/sys/unix" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupUserFunc(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, ErrNoPasswdEntries - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Gid == gid - }) -} - -func lookupGroupFunc(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, ErrNoGroupEntries - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(unix.Getuid()) -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(unix.Getgid()) -} - -func currentUserSubIDs(fileName string) ([]SubID, error) { - u, err := CurrentUser() - if err != nil { - return nil, err - } - filter := func(entry SubID) bool { - return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) - } - return ParseSubIDFileFilter(fileName, filter) -} - -func CurrentUserSubUIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subuid") -} - -func CurrentUserSubGIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subgid") -} - -func CurrentProcessUIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/uid_map") -} - -func CurrentProcessGIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/gid_map") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index 984466d1ab..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,605 +0,0 @@ -package user - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minID = 0 - maxID = 1<<31 - 1 // for 32-bit systems compatibility -) - -var ( - // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. - ErrNoPasswdEntries = errors.New("no matching entries in passwd file") - // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. - ErrNoGroupEntries = errors.New("no matching entries in group file") - // ErrRange is returned if a UID or GID is outside of the valid range. - ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -// SubID represents an entry in /etc/sub{u,g}id -type SubID struct { - Name string - SubID int64 - Count int64 -} - -// IDMap represents an entry in /proc/PID/{u,g}id_map -type IDMap struct { - ID int64 - ParentID int64 - Count int64 -} - -func parseLine(line []byte, v ...interface{}) { - parseParts(bytes.Split(line, []byte(":")), v...) -} - -func parseParts(parts [][]byte, v ...interface{}) { - if len(parts) == 0 { - return - } - - for i, p := range parts { - // Ignore cases where we don't have enough fields to populate the arguments. - // Some configuration files like to misbehave. - if len(v) <= i { - break - } - - // Use the type of the argument to figure out how to parse it, scanf() style. - // This is legit. - switch e := v[i].(type) { - case *string: - *e = string(p) - case *int: - // "numbers", with conversion errors ignored because of some misbehaving configuration files. - *e, _ = strconv.Atoi(string(p)) - case *int64: - *e, _ = strconv.ParseInt(string(p), 10, 64) - case *[]string: - // Comma-separated lists. - if len(p) != 0 { - *e = strings.Split(string(p), ",") - } else { - *e = []string{} - } - default: - // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, errors.New("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, errors.New("nil source for group-formatted data") - } - rd := bufio.NewReader(r) - out := []Group{} - - // Read the file line-by-line. - for { - var ( - isPrefix bool - wholeLine []byte - err error - ) - - // Read the next line. We do so in chunks (as much as reader's - // buffer is able to keep), check if we read enough columns - // already on each step and store final result in wholeLine. - for { - var line []byte - line, isPrefix, err = rd.ReadLine() - - if err != nil { - // We should return no error if EOF is reached - // without a match. - if err == io.EOF { - err = nil - } - return out, err - } - - // Simple common case: line is short enough to fit in a - // single reader's buffer. - if !isPrefix && len(wholeLine) == 0 { - wholeLine = line - break - } - - wholeLine = append(wholeLine, line...) - - // Check if we read the whole line already. - if !isPrefix { - break - } - } - - // There's no spec for /etc/passwd or /etc/group, but we try to follow - // the same rules as the glibc parser, which allows comments and blank - // space at the beginning of a line. - wholeLine = bytes.TrimSpace(wholeLine) - if len(wholeLine) == 0 || wholeLine[0] == '#' { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) - - if filter == nil || filter(p) { - out = append(out, p) - } - } -} - -type ExecUser struct { - Uid int - Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - var passwd, group io.Reader - - if passwdFile, err := os.Open(passwdPath); err == nil { - passwd = passwdFile - defer passwdFile.Close() - } - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// - "" -// - "user" -// - "uid" -// - "user:group" -// - "uid:gid -// - "user:gid" -// - "uid:group" -// -// It should be noted that if you specify a numeric user or group id, they will -// not be evaluated as usernames (only the metadata will be filled). So attempting -// to parse a user with user.Name = "1337" will produce the user with a UID of -// 1337. -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax - var userArg, groupArg string - parseLine([]byte(userSpec), &userArg, &groupArg) - - // Convert userArg and groupArg to be numeric, so we don't have to execute - // Atoi *twice* for each iteration over lines. - uidArg, uidErr := strconv.Atoi(userArg) - gidArg, gidErr := strconv.Atoi(groupArg) - - // Find the matching user. - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - // Default to current state of the user. - return u.Uid == user.Uid - } - - if uidErr == nil { - // If the userArg is numeric, always treat it as a UID. - return uidArg == u.Uid - } - - return u.Name == userArg - }) - - // If we can't find the user, we have to bail. - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) - } - - var matchedUserName string - if len(users) > 0 { - // First match wins, even if there's more than one matching entry. - matchedUserName = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // If we can't find a user with the given username, the only other valid - // option is if it's a numeric username with no associated entry in passwd. - - if uidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) - } - user.Uid = uidArg - - // Must be inside valid uid range. - if user.Uid < minID || user.Uid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - - // On to the groups. If we matched a username, we need to do this because of - // the supplementary group IDs. - if groupArg != "" || matchedUserName != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // If the group argument isn't explicit, we'll just search for it. - if groupArg == "" { - // Check if user is a member of this group. - for _, u := range g.List { - if u == matchedUserName { - return true - } - } - return false - } - - if gidErr == nil { - // If the groupArg is numeric, always treat it as a GID. - return gidArg == g.Gid - } - - return g.Name == groupArg - }) - if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) - } - - // Only start modifying user.Gid if it is in explicit form. - if groupArg != "" { - if len(groups) > 0 { - // First match wins, even if there's more than one matching entry. - user.Gid = groups[0].Gid - } else { - // If we can't find a group with the given name, the only other valid - // option is if it's a numeric group name with no associated entry in group. - - if gidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) - } - user.Gid = gidArg - - // Must be inside valid gid range. - if user.Gid < minID || user.Gid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - } else if len(groups) > 0 { - // Supplementary group ids only make sense if in the implicit form. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - groups := []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.ParseInt(ag, 10, 64) - if err != nil { - // Not a numeric ID either. - return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) - } - // Ensure gid is inside gid range. - if gid < minID || gid > maxID { - return nil, ErrRange - } - gidMap[int(gid)] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - var group io.Reader - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} - -func ParseSubIDFile(path string) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubID(subid) -} - -func ParseSubID(subid io.Reader) ([]SubID, error) { - return ParseSubIDFilter(subid, nil) -} - -func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubIDFilter(subid, filter) -} - -func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { - if r == nil { - return nil, errors.New("nil source for subid-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []SubID{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 subuid - p := SubID{} - parseLine(line, &p.Name, &p.SubID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseIDMapFile(path string) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMap(r) -} - -func ParseIDMap(r io.Reader) ([]IDMap, error) { - return ParseIDMapFilter(r, nil) -} - -func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMapFilter(r, filter) -} - -func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { - if r == nil { - return nil, errors.New("nil source for idmap-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []IDMap{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 7 user_namespaces - p := IDMap{} - parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go deleted file mode 100644 index e018eae614..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package user - -import ( - "io" - "strings" -) - -func IsDivisbleBy(n int, divisibleby int) bool { - return (n % divisibleby) == 0 -} - -func FuzzUser(data []byte) int { - if len(data) == 0 { - return -1 - } - if !IsDivisbleBy(len(data), 5) { - return -1 - } - - var divided [][]byte - - chunkSize := len(data) / 5 - - for i := 0; i < len(data); i += chunkSize { - end := i + chunkSize - - divided = append(divided, data[i:end]) - } - - _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) - - var passwd, group io.Reader - - group = strings.NewReader(string(divided[1])) - _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) - - passwd = strings.NewReader(string(divided[3])) - _, _ = GetExecUser(string(divided[4]), nil, passwd, group) - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d6..f4fc884552 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + *v = *fam + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index f819e4f8b5..dfac962a4e 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -21,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 9d94ae9eff..21cdddcf05 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -46,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b065..2946b8f1a6 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643ec..ac2482782c 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c7..a21b9d15dd 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8aa8c..5727452c1e 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a28..9eb440413f 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 0000000000..0f615a7053 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 0000000000..54bb038cff --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 0000000000..726c50ee63 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6c8e3e2197..e358db69c5 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.45.2 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. - ifeq (,$(CIRCLE_JOB)) + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index ff6b927da1..06968ca2ed 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go new file mode 100644 index 0000000000..d88442f0ed --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index ea41bf2ca1..a6b2b3127c 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a82600f..f9d961e441 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1d..0c482c18cc 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index a94f86dc4a..06b7b8f216 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,8 +27,9 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 // SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { @@ -38,6 +39,18 @@ type SoftnetStat struct { Dropped uint32 // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index dcea9c5a67..5cc40aef55 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + ) + + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index cca03327c3..ea83a75ffc 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 0000000000..9df79c2379 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 48b5238194..6a43bb2459 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -33,139 +33,140 @@ type ProcNetstat struct { } type TcpExt struct { // nolint:revive - SyncookiesSent float64 - SyncookiesRecv float64 - SyncookiesFailed float64 - EmbryonicRsts float64 - PruneCalled float64 - RcvPruned float64 - OfoPruned float64 - OutOfWindowIcmps float64 - LockDroppedIcmps float64 - ArpFilter float64 - TW float64 - TWRecycled float64 - TWKilled float64 - PAWSActive float64 - PAWSEstab float64 - DelayedACKs float64 - DelayedACKLocked float64 - DelayedACKLost float64 - ListenOverflows float64 - ListenDrops float64 - TCPHPHits float64 - TCPPureAcks float64 - TCPHPAcks float64 - TCPRenoRecovery float64 - TCPSackRecovery float64 - TCPSACKReneging float64 - TCPSACKReorder float64 - TCPRenoReorder float64 - TCPTSReorder float64 - TCPFullUndo float64 - TCPPartialUndo float64 - TCPDSACKUndo float64 - TCPLossUndo float64 - TCPLostRetransmit float64 - TCPRenoFailures float64 - TCPSackFailures float64 - TCPLossFailures float64 - TCPFastRetrans float64 - TCPSlowStartRetrans float64 - TCPTimeouts float64 - TCPLossProbes float64 - TCPLossProbeRecovery float64 - TCPRenoRecoveryFail float64 - TCPSackRecoveryFail float64 - TCPRcvCollapsed float64 - TCPDSACKOldSent float64 - TCPDSACKOfoSent float64 - TCPDSACKRecv float64 - TCPDSACKOfoRecv float64 - TCPAbortOnData float64 - TCPAbortOnClose float64 - TCPAbortOnMemory float64 - TCPAbortOnTimeout float64 - TCPAbortOnLinger float64 - TCPAbortFailed float64 - TCPMemoryPressures float64 - TCPMemoryPressuresChrono float64 - TCPSACKDiscard float64 - TCPDSACKIgnoredOld float64 - TCPDSACKIgnoredNoUndo float64 - TCPSpuriousRTOs float64 - TCPMD5NotFound float64 - TCPMD5Unexpected float64 - TCPMD5Failure float64 - TCPSackShifted float64 - TCPSackMerged float64 - TCPSackShiftFallback float64 - TCPBacklogDrop float64 - PFMemallocDrop float64 - TCPMinTTLDrop float64 - TCPDeferAcceptDrop float64 - IPReversePathFilter float64 - TCPTimeWaitOverflow float64 - TCPReqQFullDoCookies float64 - TCPReqQFullDrop float64 - TCPRetransFail float64 - TCPRcvCoalesce float64 - TCPOFOQueue float64 - TCPOFODrop float64 - TCPOFOMerge float64 - TCPChallengeACK float64 - TCPSYNChallenge float64 - TCPFastOpenActive float64 - TCPFastOpenActiveFail float64 - TCPFastOpenPassive float64 - TCPFastOpenPassiveFail float64 - TCPFastOpenListenOverflow float64 - TCPFastOpenCookieReqd float64 - TCPFastOpenBlackhole float64 - TCPSpuriousRtxHostQueues float64 - BusyPollRxPackets float64 - TCPAutoCorking float64 - TCPFromZeroWindowAdv float64 - TCPToZeroWindowAdv float64 - TCPWantZeroWindowAdv float64 - TCPSynRetrans float64 - TCPOrigDataSent float64 - TCPHystartTrainDetect float64 - TCPHystartTrainCwnd float64 - TCPHystartDelayDetect float64 - TCPHystartDelayCwnd float64 - TCPACKSkippedSynRecv float64 - TCPACKSkippedPAWS float64 - TCPACKSkippedSeq float64 - TCPACKSkippedFinWait2 float64 - TCPACKSkippedTimeWait float64 - TCPACKSkippedChallenge float64 - TCPWinProbe float64 - TCPKeepAlive float64 - TCPMTUPFail float64 - TCPMTUPSuccess float64 - TCPWqueueTooBig float64 + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 } type IpExt struct { // nolint:revive - InNoRoutes float64 - InTruncatedPkts float64 - InMcastPkts float64 - OutMcastPkts float64 - InBcastPkts float64 - OutBcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InCsumErrors float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 - ReasmOverlaps float64 + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 } func (p Proc) Netstat() (ProcNetstat, error) { @@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) { if err != nil { return ProcNetstat{PID: p.PID}, err } - procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) procNetstat.PID = p.PID return procNetstat, err } -// parseNetstat parses the metrics from proc//net/netstat file +// parseProcNetstat parses the metrics from proc//net/netstat file // and returns a ProcNetstat structure. -func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { var ( scanner = bufio.NewScanner(r) procNetstat = ProcNetstat{} @@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = value + procNetstat.TcpExt.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = value + procNetstat.TcpExt.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = value + procNetstat.TcpExt.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = value + procNetstat.TcpExt.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = value + procNetstat.TcpExt.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = value + procNetstat.TcpExt.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = value + procNetstat.TcpExt.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = value + procNetstat.TcpExt.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = value + procNetstat.TcpExt.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = value + procNetstat.TcpExt.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = value + procNetstat.TcpExt.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = value + procNetstat.TcpExt.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = value + procNetstat.TcpExt.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = value + procNetstat.TcpExt.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = value + procNetstat.TcpExt.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = value + procNetstat.TcpExt.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = value + procNetstat.TcpExt.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = value + procNetstat.TcpExt.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = value + procNetstat.TcpExt.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = value + procNetstat.TcpExt.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = value + procNetstat.TcpExt.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = value + procNetstat.TcpExt.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = value + procNetstat.TcpExt.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = value + procNetstat.TcpExt.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = value + procNetstat.TcpExt.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = value + procNetstat.TcpExt.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = value + procNetstat.TcpExt.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = value + procNetstat.TcpExt.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = value + procNetstat.TcpExt.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = value + procNetstat.TcpExt.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = value + procNetstat.TcpExt.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = value + procNetstat.TcpExt.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = value + procNetstat.TcpExt.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = value + procNetstat.TcpExt.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = value + procNetstat.TcpExt.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = value + procNetstat.TcpExt.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = value + procNetstat.TcpExt.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = value + procNetstat.TcpExt.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = value + procNetstat.TcpExt.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = value + procNetstat.TcpExt.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = value + procNetstat.TcpExt.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = value + procNetstat.TcpExt.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = value + procNetstat.TcpExt.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = value + procNetstat.TcpExt.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = value + procNetstat.TcpExt.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = value + procNetstat.TcpExt.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = value + procNetstat.TcpExt.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = value + procNetstat.TcpExt.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = value + procNetstat.TcpExt.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = value + procNetstat.TcpExt.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = value + procNetstat.TcpExt.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = value + procNetstat.TcpExt.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = value + procNetstat.TcpExt.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = value + procNetstat.TcpExt.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = value + procNetstat.TcpExt.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = value + procNetstat.TcpExt.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = value + procNetstat.TcpExt.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = value + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = value + procNetstat.TcpExt.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = value + procNetstat.TcpExt.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = value + procNetstat.TcpExt.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = value + procNetstat.TcpExt.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = value + procNetstat.TcpExt.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = value + procNetstat.TcpExt.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = value + procNetstat.TcpExt.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = value + procNetstat.TcpExt.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = value + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = value + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = value + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = value + procNetstat.TcpExt.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = value + procNetstat.TcpExt.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = value + procNetstat.TcpExt.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = value + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = value + procNetstat.TcpExt.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = value + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = value + procNetstat.TcpExt.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = value + procNetstat.TcpExt.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = value + procNetstat.TcpExt.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = value + procNetstat.TcpExt.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = value + procNetstat.TcpExt.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = value + procNetstat.TcpExt.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = value + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = value + procNetstat.TcpExt.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = value + procNetstat.TcpExt.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = value + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = value + procNetstat.TcpExt.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = value + procNetstat.TcpExt.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = value + procNetstat.TcpExt.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = value + procNetstat.TcpExt.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = value + procNetstat.TcpExt.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = value + procNetstat.TcpExt.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = value + procNetstat.IpExt.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = value + procNetstat.IpExt.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = value + procNetstat.IpExt.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = value + procNetstat.IpExt.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = value + procNetstat.IpExt.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = value + procNetstat.IpExt.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = value + procNetstat.IpExt.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = value + procNetstat.IpExt.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = value + procNetstat.IpExt.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = value + procNetstat.IpExt.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = value + procNetstat.IpExt.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = value + procNetstat.IpExt.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = value + procNetstat.IpExt.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = value + procNetstat.IpExt.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = value + procNetstat.IpExt.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = value + procNetstat.IpExt.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = value + procNetstat.IpExt.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = value + procNetstat.IpExt.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index ae191896cb..6c46b71884 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -37,100 +37,100 @@ type ProcSnmp struct { } type Ip struct { // nolint:revive - Forwarding float64 - DefaultTTL float64 - InReceives float64 - InHdrErrors float64 - InAddrErrors float64 - ForwDatagrams float64 - InUnknownProtos float64 - InDiscards float64 - InDelivers float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 } -type Icmp struct { - InMsgs float64 - InErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InTimeExcds float64 - InParmProbs float64 - InSrcQuenchs float64 - InRedirects float64 - InEchos float64 - InEchoReps float64 - InTimestamps float64 - InTimestampReps float64 - InAddrMasks float64 - InAddrMaskReps float64 - OutMsgs float64 - OutErrors float64 - OutDestUnreachs float64 - OutTimeExcds float64 - OutParmProbs float64 - OutSrcQuenchs float64 - OutRedirects float64 - OutEchos float64 - OutEchoReps float64 - OutTimestamps float64 - OutTimestampReps float64 - OutAddrMasks float64 - OutAddrMaskReps float64 +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 } type IcmpMsg struct { - InType3 float64 - OutType3 float64 + InType3 *float64 + OutType3 *float64 } type Tcp struct { // nolint:revive - RtoAlgorithm float64 - RtoMin float64 - RtoMax float64 - MaxConn float64 - ActiveOpens float64 - PassiveOpens float64 - AttemptFails float64 - EstabResets float64 - CurrEstab float64 - InSegs float64 - OutSegs float64 - RetransSegs float64 - InErrs float64 - OutRsts float64 - InCsumErrors float64 + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 } type Udp struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } func (p Proc) Snmp() (ProcSnmp, error) { @@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = value + procSnmp.Ip.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = value + procSnmp.Ip.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = value + procSnmp.Ip.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = value + procSnmp.Ip.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = value + procSnmp.Ip.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = value + procSnmp.Ip.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = value + procSnmp.Ip.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = value + procSnmp.Ip.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = value + procSnmp.Ip.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = value + procSnmp.Ip.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = value + procSnmp.Ip.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = value + procSnmp.Ip.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = value + procSnmp.Ip.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = value + procSnmp.Ip.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = value + procSnmp.Ip.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = value + procSnmp.Ip.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = value + procSnmp.Ip.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = value + procSnmp.Ip.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = value + procSnmp.Ip.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = value + procSnmp.Icmp.InMsgs = &value case "InErrors": - procSnmp.Icmp.InErrors = value + procSnmp.Icmp.InErrors = &value case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = value + procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = value + procSnmp.Icmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = value + procSnmp.Icmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = value + procSnmp.Icmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = value + procSnmp.Icmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = value + procSnmp.Icmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = value + procSnmp.Icmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = value + procSnmp.Icmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = value + procSnmp.Icmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = value + procSnmp.Icmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = value + procSnmp.Icmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = value + procSnmp.Icmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = value + procSnmp.Icmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = value + procSnmp.Icmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = value + procSnmp.Icmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = value + procSnmp.Icmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = value + procSnmp.Icmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = value + procSnmp.Icmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = value + procSnmp.Icmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = value + procSnmp.Icmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = value + procSnmp.Icmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = value + procSnmp.Icmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = value + procSnmp.Icmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = value + procSnmp.Icmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = value + procSnmp.Icmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = value + procSnmp.IcmpMsg.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = value + procSnmp.IcmpMsg.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = value + procSnmp.Tcp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = value + procSnmp.Tcp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = value + procSnmp.Tcp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = value + procSnmp.Tcp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = value + procSnmp.Tcp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = value + procSnmp.Tcp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = value + procSnmp.Tcp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = value + procSnmp.Tcp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = value + procSnmp.Tcp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = value + procSnmp.Tcp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = value + procSnmp.Tcp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = value + procSnmp.Tcp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = value + procSnmp.Tcp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = value + procSnmp.Tcp.OutRsts = &value case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = value + procSnmp.Tcp.InCsumErrors = &value } case "Udp": switch key { case "InDatagrams": - procSnmp.Udp.InDatagrams = value + procSnmp.Udp.InDatagrams = &value case "NoPorts": - procSnmp.Udp.NoPorts = value + procSnmp.Udp.NoPorts = &value case "InErrors": - procSnmp.Udp.InErrors = value + procSnmp.Udp.InErrors = &value case "OutDatagrams": - procSnmp.Udp.OutDatagrams = value + procSnmp.Udp.OutDatagrams = &value case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = value + procSnmp.Udp.RcvbufErrors = &value case "SndbufErrors": - procSnmp.Udp.SndbufErrors = value + procSnmp.Udp.SndbufErrors = &value case "InCsumErrors": - procSnmp.Udp.InCsumErrors = value + procSnmp.Udp.InCsumErrors = &value case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = value + procSnmp.Udp.IgnoredMulti = &value } case "UdpLite": switch key { case "InDatagrams": - procSnmp.UdpLite.InDatagrams = value + procSnmp.UdpLite.InDatagrams = &value case "NoPorts": - procSnmp.UdpLite.NoPorts = value + procSnmp.UdpLite.NoPorts = &value case "InErrors": - procSnmp.UdpLite.InErrors = value + procSnmp.UdpLite.InErrors = &value case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = value + procSnmp.UdpLite.OutDatagrams = &value case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = value + procSnmp.UdpLite.RcvbufErrors = &value case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = value + procSnmp.UdpLite.SndbufErrors = &value case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = value + procSnmp.UdpLite.InCsumErrors = &value case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = value + procSnmp.UdpLite.IgnoredMulti = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index f611992d52..3059cc6a13 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -36,106 +36,106 @@ type ProcSnmp6 struct { } type Ip6 struct { // nolint:revive - InReceives float64 - InHdrErrors float64 - InTooBigErrors float64 - InNoRoutes float64 - InAddrErrors float64 - InUnknownProtos float64 - InTruncatedPkts float64 - InDiscards float64 - InDelivers float64 - OutForwDatagrams float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 - InMcastPkts float64 - OutMcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 } type Icmp6 struct { - InMsgs float64 - InErrors float64 - OutMsgs float64 - OutErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InPktTooBigs float64 - InTimeExcds float64 - InParmProblems float64 - InEchos float64 - InEchoReplies float64 - InGroupMembQueries float64 - InGroupMembResponses float64 - InGroupMembReductions float64 - InRouterSolicits float64 - InRouterAdvertisements float64 - InNeighborSolicits float64 - InNeighborAdvertisements float64 - InRedirects float64 - InMLDv2Reports float64 - OutDestUnreachs float64 - OutPktTooBigs float64 - OutTimeExcds float64 - OutParmProblems float64 - OutEchos float64 - OutEchoReplies float64 - OutGroupMembQueries float64 - OutGroupMembResponses float64 - OutGroupMembReductions float64 - OutRouterSolicits float64 - OutRouterAdvertisements float64 - OutNeighborSolicits float64 - OutNeighborAdvertisements float64 - OutRedirects float64 - OutMLDv2Reports float64 - InType1 float64 - InType134 float64 - InType135 float64 - InType136 float64 - InType143 float64 - OutType133 float64 - OutType135 float64 - OutType136 float64 - OutType143 float64 + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 } type Udp6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 } func (p Proc) Snmp6() (ProcSnmp6, error) { @@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = value + procSnmp6.Ip6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = value + procSnmp6.Ip6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = value + procSnmp6.Ip6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = value + procSnmp6.Ip6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = value + procSnmp6.Ip6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = value + procSnmp6.Ip6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = value + procSnmp6.Ip6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = value + procSnmp6.Ip6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = value + procSnmp6.Ip6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = value + procSnmp6.Ip6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = value + procSnmp6.Ip6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = value + procSnmp6.Ip6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = value + procSnmp6.Ip6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = value + procSnmp6.Ip6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = value + procSnmp6.Ip6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = value + procSnmp6.Ip6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = value + procSnmp6.Ip6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = value + procSnmp6.Ip6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = value + procSnmp6.Ip6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = value + procSnmp6.Ip6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = value + procSnmp6.Ip6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = value + procSnmp6.Ip6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = value + procSnmp6.Ip6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = value + procSnmp6.Ip6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = value + procSnmp6.Ip6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = value + procSnmp6.Ip6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = value + procSnmp6.Ip6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = value + procSnmp6.Ip6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = value + procSnmp6.Ip6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = value + procSnmp6.Ip6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = value + procSnmp6.Ip6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = value + procSnmp6.Ip6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = value + procSnmp6.Icmp6.InMsgs = &value case "InErrors": - procSnmp6.Icmp6.InErrors = value + procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = value + procSnmp6.Icmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = value + procSnmp6.Icmp6.OutErrors = &value case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = value + procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = value + procSnmp6.Icmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = value + procSnmp6.Icmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = value + procSnmp6.Icmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = value + procSnmp6.Icmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = value + procSnmp6.Icmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = value + procSnmp6.Icmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = value + procSnmp6.Icmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = value + procSnmp6.Icmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = value + procSnmp6.Icmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = value + procSnmp6.Icmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = value + procSnmp6.Icmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = value + procSnmp6.Icmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = value + procSnmp6.Icmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = value + procSnmp6.Icmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = value + procSnmp6.Icmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = value + procSnmp6.Icmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = value + procSnmp6.Icmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = value + procSnmp6.Icmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = value + procSnmp6.Icmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = value + procSnmp6.Icmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = value + procSnmp6.Icmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = value + procSnmp6.Icmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = value + procSnmp6.Icmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = value + procSnmp6.Icmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = value + procSnmp6.Icmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = value + procSnmp6.Icmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = value + procSnmp6.Icmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = value + procSnmp6.Icmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = value + procSnmp6.Icmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = value + procSnmp6.Icmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = value + procSnmp6.Icmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = value + procSnmp6.Icmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = value + procSnmp6.Icmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = value + procSnmp6.Icmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = value + procSnmp6.Icmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = value + procSnmp6.Icmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = value + procSnmp6.Icmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = value + procSnmp6.Icmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = value + procSnmp6.Icmp6.OutType143 = &value } case "Udp6": switch key { case "InDatagrams": - procSnmp6.Udp6.InDatagrams = value + procSnmp6.Udp6.InDatagrams = &value case "NoPorts": - procSnmp6.Udp6.NoPorts = value + procSnmp6.Udp6.NoPorts = &value case "InErrors": - procSnmp6.Udp6.InErrors = value + procSnmp6.Udp6.InErrors = &value case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = value + procSnmp6.Udp6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = value + procSnmp6.Udp6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = value + procSnmp6.Udp6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = value + procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = value + procSnmp6.Udp6.IgnoredMulti = &value } case "UdpLite6": switch key { case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = value + procSnmp6.UdpLite6.InDatagrams = &value case "NoPorts": - procSnmp6.UdpLite6.NoPorts = value + procSnmp6.UdpLite6.NoPorts = &value case "InErrors": - procSnmp6.UdpLite6.InErrors = value + procSnmp6.UdpLite6.InErrors = &value case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = value + procSnmp6.UdpLite6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = value + procSnmp6.UdpLite6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = value + procSnmp6.UdpLite6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = value + procSnmp6.UdpLite6.InCsumErrors = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06c556ef96..b278eb2c2d 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 594022ded4..3d8c06439a 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 33f97caa08..586af48af9 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) { if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 0000000000..f08bfc769d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 20ceb77e2d..cdedcae996 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -26,7 +26,9 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // and numa_zonelist_order (deprecated) which is a string. diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go deleted file mode 100644 index 5d74cae493..0000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go +++ /dev/null @@ -1,39 +0,0 @@ -package api - -import "net" - -const ( - // Version of the REST API, not implementation version. - // See openapi.yaml for the definition. - Version = "1.1.1" -) - -// ErrorJSON is returned with "application/json" content type and non-2XX status code -type ErrorJSON struct { - Message string `json:"message"` -} - -// Info is the structure returned by `GET /info` -type Info struct { - APIVersion string `json:"apiVersion"` // REST API version - Version string `json:"version"` // Implementation version - StateDir string `json:"stateDir"` - ChildPID int `json:"childPID"` - NetworkDriver *NetworkDriverInfo `json:"networkDriver,omitempty"` - PortDriver *PortDriverInfo `json:"portDriver,omitempty"` -} - -// NetworkDriverInfo in Info -type NetworkDriverInfo struct { - Driver string `json:"driver"` - DNS []net.IP `json:"dns,omitempty"` - ChildIP net.IP `json:"childIP,omitempty"` // since API v1.1.1 (RootlessKit v0.14.1) - DynamicChildIP bool `json:"dynamicChildIP,omitempty"` // since API v1.1.1 -} - -// PortDriverInfo in Info -type PortDriverInfo struct { - Driver string `json:"driver"` - Protos []string `json:"protos"` - DisallowLoopbackChildIP bool `json:"disallowLoopbackChildIP,omitempty"` // since API v1.1.1 -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/client/client.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/client/client.go deleted file mode 100644 index 53dcadd5e4..0000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/client/client.go +++ /dev/null @@ -1,212 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "os" - - "github.com/rootless-containers/rootlesskit/pkg/api" - "github.com/rootless-containers/rootlesskit/pkg/port" -) - -type Client interface { - HTTPClient() *http.Client - PortManager() port.Manager - Info(context.Context) (*api.Info, error) -} - -// New creates a client. -// socketPath is a path to the UNIX socket, without unix:// prefix. -func New(socketPath string) (Client, error) { - if _, err := os.Stat(socketPath); err != nil { - return nil, err - } - hc := &http.Client{ - Transport: &http.Transport{ - DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, "unix", socketPath) - }, - }, - } - return NewWithHTTPClient(hc), nil -} - -func NewWithHTTPClient(hc *http.Client) Client { - return &client{ - Client: hc, - version: "v1", - dummyHost: "rootlesskit", - } -} - -type client struct { - *http.Client - // version is always "v1" - // TODO(AkihiroSuda): negotiate the version - version string - dummyHost string -} - -func (c *client) HTTPClient() *http.Client { - return c.Client -} - -func (c *client) PortManager() port.Manager { - return &portManager{ - client: c, - } -} - -func (c *client) Info(ctx context.Context) (*api.Info, error) { - u := fmt.Sprintf("http://%s/%s/info", c.dummyHost, c.version) - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - resp, err := c.HTTPClient().Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if err := successful(resp); err != nil { - return nil, err - } - var info api.Info - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&info); err != nil { - return nil, err - } - return &info, nil -} - -func readAtMost(r io.Reader, maxBytes int) ([]byte, error) { - lr := &io.LimitedReader{ - R: r, - N: int64(maxBytes), - } - b, err := io.ReadAll(lr) - if err != nil { - return b, err - } - if lr.N == 0 { - return b, fmt.Errorf("expected at most %d bytes, got more", maxBytes) - } - return b, nil -} - -// HTTPStatusErrorBodyMaxLength specifies the maximum length of HTTPStatusError.Body -const HTTPStatusErrorBodyMaxLength = 64 * 1024 - -// HTTPStatusError is created from non-2XX HTTP response -type HTTPStatusError struct { - // StatusCode is non-2XX status code - StatusCode int - // Body is at most HTTPStatusErrorBodyMaxLength - Body string -} - -// Error implements error. -// If e.Body is a marshalled string of api.ErrorJSON, Error returns ErrorJSON.Message . -// Otherwise Error returns a human-readable string that contains e.StatusCode and e.Body. -func (e *HTTPStatusError) Error() string { - if e.Body != "" && len(e.Body) < HTTPStatusErrorBodyMaxLength { - var ej api.ErrorJSON - if json.Unmarshal([]byte(e.Body), &ej) == nil { - return ej.Message - } - } - return fmt.Sprintf("unexpected HTTP status %s, body=%q", http.StatusText(e.StatusCode), e.Body) -} - -func successful(resp *http.Response) error { - if resp == nil { - return errors.New("nil response") - } - if resp.StatusCode/100 != 2 { - b, _ := readAtMost(resp.Body, HTTPStatusErrorBodyMaxLength) - return &HTTPStatusError{ - StatusCode: resp.StatusCode, - Body: string(b), - } - } - return nil -} - -type portManager struct { - *client -} - -func (pm *portManager) AddPort(ctx context.Context, spec port.Spec) (*port.Status, error) { - m, err := json.Marshal(spec) - if err != nil { - return nil, err - } - u := fmt.Sprintf("http://%s/%s/ports", pm.client.dummyHost, pm.client.version) - req, err := http.NewRequest("POST", u, bytes.NewReader(m)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req = req.WithContext(ctx) - resp, err := pm.client.HTTPClient().Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if err := successful(resp); err != nil { - return nil, err - } - dec := json.NewDecoder(resp.Body) - var status port.Status - if err := dec.Decode(&status); err != nil { - return nil, err - } - return &status, nil -} -func (pm *portManager) ListPorts(ctx context.Context) ([]port.Status, error) { - u := fmt.Sprintf("http://%s/%s/ports", pm.client.dummyHost, pm.client.version) - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - resp, err := pm.client.HTTPClient().Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if err := successful(resp); err != nil { - return nil, err - } - var statuses []port.Status - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&statuses); err != nil { - return nil, err - } - return statuses, nil -} -func (pm *portManager) RemovePort(ctx context.Context, id int) error { - u := fmt.Sprintf("http://%s/%s/ports/%d", pm.client.dummyHost, pm.client.version, id) - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - req = req.WithContext(ctx) - resp, err := pm.client.HTTPClient().Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if err := successful(resp); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml deleted file mode 100644 index dffc496808..0000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml +++ /dev/null @@ -1,171 +0,0 @@ -# When you made a change to this YAML, please validate with https://editor.swagger.io -openapi: 3.0.3 -info: - version: 1.1.1 - title: RootlessKit API -servers: - - url: 'http://rootlesskit/v1' - description: Local UNIX socket server. The host part of the URL is ignored. -paths: -# /info: API >= 1.1.0 - /info: - get: - responses: - '200': - description: Info. Available since API 1.1.0. - content: - application/json: - schema: - $ref: '#/components/schemas/Info' - /ports: - get: - responses: - '200': - description: An array of PortStatus - content: - application/json: - schema: - $ref: '#/components/schemas/PortStatuses' - post: - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PortSpec' - responses: - '201': - description: PortStatus with ID - content: - application/json: - schema: - $ref: '#/components/schemas/PortStatus' - '/ports/{id}': - delete: - parameters: - - name: id - in: path - required: true - schema: - type: integer - format: int64 - responses: - '200': - description: Null response -components: - schemas: - Proto: - type: string - description: "protocol for listening. Corresponds to Go's net.Listen. The strings with \"4\" and \"6\" suffixes were introduced in API 1.1.0." - enum: - - tcp - - tcp4 - - tcp6 - - udp - - udp4 - - udp6 - - sctp - - sctp4 - - sctp6 - PortSpec: - required: - - proto - properties: - proto: - $ref: '#/components/schemas/Proto' - parentIP: - type: string - parentPort: - type: integer - format: int32 - minimum: 1 - maximum: 65535 - childIP: - type: string -# future version may support requests with parentPort<=0 for automatic port assignment - childPort: - type: integer - format: int32 - minimum: 1 - maximum: 65535 - PortStatus: - required: - - id - properties: - id: - type: integer - format: int64 - spec: - $ref: '#/components/schemas/PortSpec' - PortStatuses: - type: array - items: - $ref: '#/components/schemas/PortStatus' -# Info: API >= 1.1.0 - Info: - required: - - apiVersion - - version - - stateDir - - childPID - properties: - apiVersion: - type: string - description: "API version, without \"v\" prefix" - example: "1.1.0" - version: - type: string - description: "Implementation version, without \"v\" prefix" - example: "0.42.0-beta.1+dev" - stateDir: - type: string - description: "state dir" - example: "/run/user/1000/rootlesskit" - childPID: - type: integer - description: "child PID" - example: 10042 - networkDriver: - $ref: '#/components/schemas/NetworkDriverInfo' - portDriver: - $ref: '#/components/schemas/PortDriverInfo' - NetworkDriverInfo: - required: - - driver - properties: - driver: - type: string - description: "network driver. Empty when --net=host." - example: "slirp4netns" -# TODO: return TAP info - dns: - type: array - description: "DNS addresses" - items: - type: string - example: ["10.0.2.3"] - childIP: - type: string - description: "Child IP (v4)" - example: "10.0.2.100" - dynamicChildIP: - type: boolean - description: "Child IP may change" - PortDriverInfo: - required: - - driver - - supportedProtos - properties: - driver: - type: string - description: "port driver" - example: "builtin" - protos: - type: array - description: "The supported protocol strings for listening ports" - example: ["tcp","udp"] - items: - $ref: '#/components/schemas/Proto' - disallowLoopbackChildIP: - type: boolean - description: "If this field is set to true, loopback IP such as 127.0.0.1 cannot be specified as a child IP" diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go deleted file mode 100644 index c95bfc7c74..0000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go +++ /dev/null @@ -1,61 +0,0 @@ -package port - -import ( - "context" - "net" - - "github.com/rootless-containers/rootlesskit/pkg/api" -) - -type Spec struct { - // Proto is one of ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"]. - // "tcp" may cause listening on both IPv4 and IPv6. (Corresponds to Go's net.Listen .) - Proto string `json:"proto,omitempty"` - ParentIP string `json:"parentIP,omitempty"` // IPv4 or IPv6 address. can be empty (0.0.0.0). - ParentPort int `json:"parentPort,omitempty"` - ChildPort int `json:"childPort,omitempty"` - // ChildIP is an IPv4 or IPv6 address. - // Default values: - // - builtin driver: 127.0.0.1 - // - slirp4netns driver: slirp4netns's child IP, e.g., 10.0.2.100 - ChildIP string `json:"childIP,omitempty"` -} - -type Status struct { - ID int `json:"id"` - Spec Spec `json:"spec"` -} - -// Manager MUST be thread-safe. -type Manager interface { - AddPort(ctx context.Context, spec Spec) (*Status, error) - ListPorts(ctx context.Context) ([]Status, error) - RemovePort(ctx context.Context, id int) error -} - -// ChildContext is used for RunParentDriver -type ChildContext struct { - // PID of the child, can be used for ns-entering to the child namespaces. - PID int - // IP of the tap device - IP net.IP -} - -// ParentDriver is a driver for the parent process. -type ParentDriver interface { - Manager - Info(ctx context.Context) (*api.PortDriverInfo, error) - // OpaqueForChild typically consists of socket path - // for controlling child from parent - OpaqueForChild() map[string]string - // RunParentDriver signals initComplete when ParentDriver is ready to - // serve as Manager. - // RunParentDriver blocks until quit is signaled. - // - // ChildContext is optional. - RunParentDriver(initComplete chan struct{}, quit <-chan struct{}, cctx *ChildContext) error -} - -type ChildDriver interface { - RunChildDriver(opaque map[string]string, quit <-chan struct{}) error -} diff --git a/vendor/github.com/tonistiigi/fsutil/.gitignore b/vendor/github.com/tonistiigi/fsutil/.gitignore deleted file mode 100644 index 51b9602c84..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -# if you want to ignore files created by your editor/tools, consider using a -# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files -.* -!.github -!.gitignore -!.travis.yml -*.prof -# support running go modules in vendor mode for local development -vendor/ diff --git a/vendor/github.com/tonistiigi/fsutil/Dockerfile b/vendor/github.com/tonistiigi/fsutil/Dockerfile deleted file mode 100644 index 252b497638..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -#syntax=docker/dockerfile:1 -ARG GO_VERSION=1.18 - -FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.1.0 AS xx - -FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base -RUN apk add --no-cache git -COPY --from=xx / / -WORKDIR /src - -FROM base AS build -ARG TARGETPLATFORM -RUN --mount=target=. --mount=target=/go/pkg/mod,type=cache \ - --mount=target=/root/.cache,type=cache \ - xx-go build ./... - -FROM base AS test -ARG TESTFLAGS -RUN --mount=target=. --mount=target=/go/pkg/mod,type=cache \ - --mount=target=/root/.cache,type=cache \ - CGO_ENABLED=0 xx-go test -test.v ${TESTFLAGS} ./... - -FROM base AS test-noroot -RUN mkdir /go/pkg && chmod 0777 /go/pkg -USER 1000:1000 -RUN --mount=target=. \ - --mount=target=/tmp/.cache,type=cache \ - CGO_ENABLED=0 GOCACHE=/tmp/gocache xx-go test -test.v ./... - -FROM build diff --git a/vendor/github.com/tonistiigi/fsutil/LICENSE b/vendor/github.com/tonistiigi/fsutil/LICENSE deleted file mode 100644 index 7df441d93e..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -MIT - -Copyright 2017 Tõnis Tiigi - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go deleted file mode 100644 index dd65a49ad1..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build linux -// +build linux - -package fsutil - -import ( - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func chtimes(path string, un int64) error { - var utimes [2]unix.Timespec - utimes[0] = unix.NsecToTimespec(un) - utimes[1] = utimes[0] - - if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrap(err, "failed call to UtimesNanoAt") - } - - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go deleted file mode 100644 index a3ba09881d..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !linux - -package fsutil - -import ( - "os" - "time" - - "github.com/pkg/errors" -) - -func chtimes(path string, un int64) error { - mtime := time.Unix(0, un) - fi, err := os.Lstat(path) - if err != nil { - return errors.WithStack(err) - } - if fi.Mode()&os.ModeSymlink != 0 { - return nil - } - return errors.WithStack(os.Chtimes(path, mtime, mtime)) -} diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go deleted file mode 100644 index a7405dc533..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diff.go +++ /dev/null @@ -1,51 +0,0 @@ -package fsutil - -import ( - "context" - "hash" - "os" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error - -func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error { - return nil -} - -type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error - -type ContentHasher func(*types.Stat) (hash.Hash, error) - -func getWalkerFn(root string) walkerFn { - return func(ctx context.Context, pathC chan<- *currentPath) error { - return errors.Wrap(Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - stat, ok := f.Sys().(*types.Stat) - if !ok { - return errors.Errorf("%T invalid file without stat information", f.Sys()) - } - - p := ¤tPath{ - path: path, - stat: stat, - } - - select { - case <-ctx.Done(): - return ctx.Err() - case pathC <- p: - return nil - } - }), "failed to walk") - } -} - -func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error { - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go deleted file mode 100644 index d8619abf1c..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go +++ /dev/null @@ -1,253 +0,0 @@ -package fsutil - -import ( - "bytes" - "context" - "io" - "os" - "strings" - - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -// Everything below is copied from containerd/fs. TODO: remove duplication @dmcgowan - -// Const redefined because containerd/fs doesn't build on !linux - -// ChangeKind is the type of modification that -// a change is making. -type ChangeKind int - -const ( - // ChangeKindAdd represents an addition of - // a file - ChangeKindAdd ChangeKind = iota - - // ChangeKindModify represents a change to - // an existing file - ChangeKindModify - - // ChangeKindDelete represents a delete of - // a file - ChangeKindDelete -) - -func (k ChangeKind) String() string { - switch k { - case ChangeKindAdd: - return "add" - case ChangeKindModify: - return "modify" - case ChangeKindDelete: - return "delete" - default: - return "unknown" - } -} - -// ChangeFunc is the type of function called for each change -// computed during a directory changes calculation. -type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error - -const compareChunkSize = 32 * 1024 - -type currentPath struct { - path string - stat *types.Stat - // fullPath string -} - -// doubleWalkDiff walks both directories to create a diff -func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b walkerFn, filter FilterFunc, differ DiffType) (err error) { - g, ctx := errgroup.WithContext(ctx) - - var ( - c1 = make(chan *currentPath, 128) - c2 = make(chan *currentPath, 128) - - f1, f2 *currentPath - rmdir string - ) - g.Go(func() error { - defer close(c1) - return a(ctx, c1) - }) - g.Go(func() error { - defer close(c2) - return b(ctx, c2) - }) - g.Go(func() error { - loop0: - for c1 != nil || c2 != nil { - if f1 == nil && c1 != nil { - f1, err = nextPath(ctx, c1) - if err != nil { - return err - } - if f1 == nil { - c1 = nil - } - } - - if f2 == nil && c2 != nil { - f2, err = nextPath(ctx, c2) - if err != nil { - return err - } - if f2 == nil { - c2 = nil - } - } - if f1 == nil && f2 == nil { - continue - } - - var f *types.Stat - var f2copy *currentPath - if f2 != nil { - statCopy := *f2.stat - if filter != nil { - filter(f2.path, &statCopy) - } - f2copy = ¤tPath{path: f2.path, stat: &statCopy} - } - k, p := pathChange(f1, f2copy) - switch k { - case ChangeKindAdd: - if rmdir != "" { - rmdir = "" - } - f = f2.stat - f2 = nil - case ChangeKindDelete: - // Check if this file is already removed by being - // under of a removed directory - if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { - f1 = nil - continue - } else if rmdir == "" && f1.stat.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f1 = nil - case ChangeKindModify: - same, err := sameFile(f1, f2copy, differ) - if err != nil { - return err - } - if f1.stat.IsDir() && !f2copy.stat.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f = f2.stat - f1 = nil - f2 = nil - if same { - continue loop0 - } - } - if err := changeFn(k, p, &StatInfo{f}, nil); err != nil { - return err - } - } - return nil - }) - - return g.Wait() -} - -func pathChange(lower, upper *currentPath) (ChangeKind, string) { - if lower == nil { - if upper == nil { - panic("cannot compare nil paths") - } - return ChangeKindAdd, upper.path - } - if upper == nil { - return ChangeKindDelete, lower.path - } - - switch i := ComparePath(lower.path, upper.path); { - case i < 0: - // File in lower that is not in upper - return ChangeKindDelete, lower.path - case i > 0: - // File in upper that is not in lower - return ChangeKindAdd, upper.path - default: - return ChangeKindModify, upper.path - } -} - -func sameFile(f1, f2 *currentPath, differ DiffType) (same bool, retErr error) { - if differ == DiffNone { - return false, nil - } - // If not a directory also check size, modtime, and content - if !f1.stat.IsDir() { - if f1.stat.Size_ != f2.stat.Size_ { - return false, nil - } - - if f1.stat.ModTime != f2.stat.ModTime { - return false, nil - } - } - - same, err := compareStat(f1.stat, f2.stat) - if err != nil || !same || differ == DiffMetadata { - return same, err - } - return compareFileContent(f1.path, f2.path) -} - -func compareFileContent(p1, p2 string) (bool, error) { - f1, err := os.Open(p1) - if err != nil { - return false, err - } - defer f1.Close() - f2, err := os.Open(p2) - if err != nil { - return false, err - } - defer f2.Close() - - b1 := make([]byte, compareChunkSize) - b2 := make([]byte, compareChunkSize) - for { - n1, err1 := f1.Read(b1) - if err1 != nil && err1 != io.EOF { - return false, err1 - } - n2, err2 := f2.Read(b2) - if err2 != nil && err2 != io.EOF { - return false, err2 - } - if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { - return false, nil - } - if err1 == io.EOF && err2 == io.EOF { - return true, nil - } - } -} - -// compareStat returns whether the stats are equivalent, -// whether the files are considered the same file, and -// an error -func compareStat(ls1, ls2 *types.Stat) (bool, error) { - return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Devmajor == ls2.Devmajor && ls1.Devminor == ls2.Devminor && ls1.Linkname == ls2.Linkname, nil -} - -func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case p := <-pathC: - return p, nil - } -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go deleted file mode 100644 index b822644ddc..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ /dev/null @@ -1,365 +0,0 @@ -package fsutil - -import ( - "context" - "hash" - "io" - gofs "io/fs" - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" - - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -type WriteToFunc func(context.Context, string, io.WriteCloser) error - -type DiskWriterOpt struct { - AsyncDataCb WriteToFunc - SyncDataCb WriteToFunc - NotifyCb func(ChangeKind, string, os.FileInfo, error) error - ContentHasher ContentHasher - Filter FilterFunc -} - -type FilterFunc func(string, *types.Stat) bool - -type DiskWriter struct { - opt DiskWriterOpt - dest string - - ctx context.Context - cancel func() - eg *errgroup.Group - filter FilterFunc - dirModTimes map[string]int64 -} - -func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { - if opt.SyncDataCb == nil && opt.AsyncDataCb == nil { - return nil, errors.New("no data callback specified") - } - if opt.SyncDataCb != nil && opt.AsyncDataCb != nil { - return nil, errors.New("can't specify both sync and async data callbacks") - } - - ctx, cancel := context.WithCancel(ctx) - eg, ctx := errgroup.WithContext(ctx) - - return &DiskWriter{ - opt: opt, - dest: dest, - eg: eg, - ctx: ctx, - cancel: cancel, - filter: opt.Filter, - dirModTimes: map[string]int64{}, - }, nil -} - -func (dw *DiskWriter) Wait(ctx context.Context) error { - if err := dw.eg.Wait(); err != nil { - return err - } - return filepath.WalkDir(dw.dest, func(path string, d gofs.DirEntry, prevErr error) error { - if prevErr != nil { - return prevErr - } - if !d.IsDir() { - return nil - } - if mtime, ok := dw.dirModTimes[path]; ok { - return chtimes(path, mtime) - } - return nil - }) -} - -func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - if err != nil { - return err - } - - select { - case <-dw.ctx.Done(): - return dw.ctx.Err() - default: - } - - defer func() { - if retErr != nil { - dw.cancel() - } - }() - - destPath := filepath.Join(dw.dest, filepath.FromSlash(p)) - - if kind == ChangeKindDelete { - if dw.filter != nil { - var empty types.Stat - if ok := dw.filter(p, &empty); !ok { - return nil - } - } - // todo: no need to validate if diff is trusted but is it always? - if err := os.RemoveAll(destPath); err != nil { - return errors.Wrapf(err, "failed to remove: %s", destPath) - } - if dw.opt.NotifyCb != nil { - if err := dw.opt.NotifyCb(kind, p, nil, nil); err != nil { - return err - } - } - return nil - } - - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EBADMSG, Op: "change without stat info"}) - } - - statCopy := *stat - - if dw.filter != nil { - if ok := dw.filter(p, &statCopy); !ok { - return nil - } - } - - rename := true - oldFi, err := os.Lstat(destPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - if kind != ChangeKindAdd { - return errors.Wrap(err, "modify/rm") - } - rename = false - } else { - return errors.WithStack(err) - } - } - - if oldFi != nil && fi.IsDir() && oldFi.IsDir() { - if err := rewriteMetadata(destPath, &statCopy); err != nil { - return errors.Wrapf(err, "error setting dir metadata for %s", destPath) - } - return nil - } - - newPath := destPath - if rename { - newPath = filepath.Join(filepath.Dir(destPath), ".tmp."+nextSuffix()) - } - - isRegularFile := false - - switch { - case fi.IsDir(): - if err := os.Mkdir(newPath, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to create dir %s", newPath) - } - dw.dirModTimes[destPath] = statCopy.ModTime - case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: - if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil { - return errors.Wrapf(err, "failed to create device %s", newPath) - } - case fi.Mode()&os.ModeSymlink != 0: - if err := os.Symlink(statCopy.Linkname, newPath); err != nil { - return errors.Wrapf(err, "failed to symlink %s", newPath) - } - case statCopy.Linkname != "": - if err := os.Link(filepath.Join(dw.dest, statCopy.Linkname), newPath); err != nil { - return errors.Wrapf(err, "failed to link %s to %s", newPath, statCopy.Linkname) - } - default: - isRegularFile = true - file, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY, fi.Mode()) //todo: windows - if err != nil { - return errors.Wrapf(err, "failed to create %s", newPath) - } - if dw.opt.SyncDataCb != nil { - if err := dw.processChange(ChangeKindAdd, p, fi, file); err != nil { - file.Close() - return err - } - break - } - if err := file.Close(); err != nil { - return errors.Wrapf(err, "failed to close %s", newPath) - } - } - - if err := rewriteMetadata(newPath, &statCopy); err != nil { - return errors.Wrapf(err, "error setting metadata for %s", newPath) - } - - if rename { - if oldFi.IsDir() != fi.IsDir() { - if err := os.RemoveAll(destPath); err != nil { - return errors.Wrapf(err, "failed to remove %s", destPath) - } - } - if err := os.Rename(newPath, destPath); err != nil { - return errors.Wrapf(err, "failed to rename %s to %s", newPath, destPath) - } - } - - if isRegularFile { - if dw.opt.AsyncDataCb != nil { - dw.requestAsyncFileData(p, destPath, fi, &statCopy) - } - } else { - return dw.processChange(kind, p, fi, nil) - } - - return nil -} - -func (dw *DiskWriter) requestAsyncFileData(p, dest string, fi os.FileInfo, st *types.Stat) { - // todo: limit worker threads - dw.eg.Go(func() error { - if err := dw.processChange(ChangeKindAdd, p, fi, &lazyFileWriter{ - dest: dest, - }); err != nil { - return err - } - return chtimes(dest, st.ModTime) // TODO: parent dirs - }) -} - -func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w io.WriteCloser) error { - origw := w - var hw *hashedWriter - if dw.opt.NotifyCb != nil { - var err error - if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil { - return err - } - w = hw - } - if origw != nil { - fn := dw.opt.SyncDataCb - if fn == nil && dw.opt.AsyncDataCb != nil { - fn = dw.opt.AsyncDataCb - } - if err := fn(dw.ctx, p, w); err != nil { - return err - } - } else { - if hw != nil { - hw.Close() - } - } - if hw != nil { - return dw.opt.NotifyCb(kind, p, hw, nil) - } - return nil -} - -type hashedWriter struct { - os.FileInfo - io.Writer - h hash.Hash - w io.WriteCloser - dgst digest.Digest -} - -func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) { - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return nil, errors.Errorf("invalid change without stat information") - } - - h, err := ch(stat) - if err != nil { - return nil, err - } - hw := &hashedWriter{ - FileInfo: fi, - Writer: io.MultiWriter(w, h), - h: h, - w: w, - } - return hw, nil -} - -func (hw *hashedWriter) Close() error { - hw.dgst = digest.NewDigest(digest.SHA256, hw.h) - if hw.w != nil { - return hw.w.Close() - } - return nil -} - -func (hw *hashedWriter) Digest() digest.Digest { - return hw.dgst -} - -type lazyFileWriter struct { - dest string - f *os.File - fileMode *os.FileMode -} - -func (lfw *lazyFileWriter) Write(dt []byte) (int, error) { - if lfw.f == nil { - file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows - if os.IsPermission(err) { - // retry after chmod - fi, er := os.Stat(lfw.dest) - if er == nil { - mode := fi.Mode() - lfw.fileMode = &mode - er = os.Chmod(lfw.dest, mode|0222) - if er == nil { - file, err = os.OpenFile(lfw.dest, os.O_WRONLY, 0) - } - } - } - if err != nil { - return 0, errors.Wrapf(err, "failed to open %s", lfw.dest) - } - lfw.f = file - } - return lfw.f.Write(dt) -} - -func (lfw *lazyFileWriter) Close() error { - var err error - if lfw.f != nil { - err = lfw.f.Close() - } - if err == nil && lfw.fileMode != nil { - err = os.Chmod(lfw.dest, *lfw.fileMode) - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go deleted file mode 100644 index ed6356fabe..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build freebsd -// +build freebsd - -package fsutil - -import ( - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sys/unix" -) - -func createSpecialFile(path string, mode uint32, stat *types.Stat) error { - return unix.Mknod(path, mode, mkdev(stat.Devmajor, stat.Devminor)) -} - -func mkdev(major int64, minor int64) uint64 { - return unix.Mkdev(uint32(major), uint32(minor)) -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go deleted file mode 100644 index 1d97d6f9d7..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !windows -// +build !windows - -package fsutil - -import ( - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func rewriteMetadata(p string, stat *types.Stat) error { - for key, value := range stat.Xattrs { - sysx.Setxattr(p, key, value, 0) - } - - if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { - return errors.WithStack(err) - } - - if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { - if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { - return errors.WithStack(err) - } - } - - if err := chtimes(p, stat.ModTime); err != nil { - return err - } - - return nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { - mode := uint32(stat.Mode & 07777) - if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 { - mode |= syscall.S_IFCHR - } else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 { - mode |= syscall.S_IFIFO - } else { - mode |= syscall.S_IFBLK - } - - if err := createSpecialFile(path, mode, stat); err != nil { - return errors.WithStack(err) - } - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go deleted file mode 100644 index 927dba4602..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !windows && !freebsd -// +build !windows,!freebsd - -package fsutil - -import ( - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sys/unix" -) - -func createSpecialFile(path string, mode uint32, stat *types.Stat) error { - return unix.Mknod(path, mode, mkdev(stat.Devmajor, stat.Devminor)) -} - -func mkdev(major int64, minor int64) int { - return int(unix.Mkdev(uint32(major), uint32(minor))) -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go deleted file mode 100644 index 036544f0b6..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package fsutil - -import ( - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func rewriteMetadata(p string, stat *types.Stat) error { - return chtimes(p, stat.ModTime) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { - return errors.New("Not implemented on windows") -} diff --git a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl deleted file mode 100644 index 3d7d182c3c..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl +++ /dev/null @@ -1,67 +0,0 @@ -variable "GO_VERSION" { - default = "1.18" -} - -group "default" { - targets = ["build"] -} - -target "build" { - args = { - GO_VERSION = "${GO_VERSION}" - } -} - -group "test" { - targets = ["test-root", "test-noroot"] -} - -target "test-root" { - inherits = ["build"] - target = "test" -} - -target "test-noroot" { - inherits = ["build"] - target = "test-noroot" -} - -target "lint" { - dockerfile = "./hack/dockerfiles/lint.Dockerfile" - args = { - GO_VERSION = "${GO_VERSION}" - } -} - -target "validate-gomod" { - dockerfile = "./hack/dockerfiles/gomod.Dockerfile" - target = "validate" - args = { - # go mod may produce different results between go versions, - # if this becomes a problem, this should be switched to use - # a fixed go version. - GO_VERSION = "${GO_VERSION}" - } -} - -target "gomod" { - inherits = ["validate-gomod"] - output = ["."] - target = "update" -} - -target "validate-shfmt" { - dockerfile = "./hack/dockerfiles/shfmt.Dockerfile" - target = "validate" -} - -target "shfmt" { - inherits = ["validate-shfmt"] - output = ["."] - target = "update" -} - -target "cross" { - inherits = ["build"] - platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "windows/arm64", "freebsd/amd64", "freebsd/arm64"] -} diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go deleted file mode 100644 index 136a908211..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/followlinks.go +++ /dev/null @@ -1,149 +0,0 @@ -package fsutil - -import ( - "os" - "path/filepath" - "runtime" - "sort" - strings "strings" - - "github.com/pkg/errors" -) - -func FollowLinks(root string, paths []string) ([]string, error) { - r := &symlinkResolver{root: root, resolved: map[string]struct{}{}} - for _, p := range paths { - if err := r.append(p); err != nil { - return nil, err - } - } - res := make([]string, 0, len(r.resolved)) - for r := range r.resolved { - res = append(res, r) - } - sort.Strings(res) - return dedupePaths(res), nil -} - -type symlinkResolver struct { - root string - resolved map[string]struct{} -} - -func (r *symlinkResolver) append(p string) error { - p = filepath.Join(".", p) - current := "." - for { - parts := strings.SplitN(p, string(filepath.Separator), 2) - current = filepath.Join(current, parts[0]) - - targets, err := r.readSymlink(current, true) - if err != nil { - return err - } - - p = "" - if len(parts) == 2 { - p = parts[1] - } - - if p == "" || targets != nil { - if _, ok := r.resolved[current]; ok { - return nil - } - } - - if targets != nil { - r.resolved[current] = struct{}{} - for _, target := range targets { - if err := r.append(filepath.Join(target, p)); err != nil { - return err - } - } - return nil - } - - if p == "" { - r.resolved[current] = struct{}{} - return nil - } - } -} - -func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, error) { - realPath := filepath.Join(r.root, p) - base := filepath.Base(p) - if allowWildcard && containsWildcards(base) { - fis, err := os.ReadDir(filepath.Dir(realPath)) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil, nil - } - return nil, errors.Wrap(err, "readdir") - } - var out []string - for _, f := range fis { - if ok, _ := filepath.Match(base, f.Name()); ok { - res, err := r.readSymlink(filepath.Join(filepath.Dir(p), f.Name()), false) - if err != nil { - return nil, err - } - out = append(out, res...) - } - } - return out, nil - } - - fi, err := os.Lstat(realPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil, nil - } - return nil, errors.WithStack(err) - } - if fi.Mode()&os.ModeSymlink == 0 { - return nil, nil - } - link, err := os.Readlink(realPath) - if err != nil { - return nil, errors.WithStack(err) - } - link = filepath.Clean(link) - if filepath.IsAbs(link) { - return []string{link}, nil - } - return []string{ - filepath.Join(string(filepath.Separator), filepath.Join(filepath.Dir(p), link)), - }, nil -} - -func containsWildcards(name string) bool { - isWindows := runtime.GOOS == "windows" - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' && !isWindows { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -// dedupePaths expects input as a sorted list -func dedupePaths(in []string) []string { - out := make([]string, 0, len(in)) - var last string - for _, s := range in { - // if one of the paths is root there is no filter - if s == "." { - return nil - } - if strings.HasPrefix(s, last+string(filepath.Separator)) { - continue - } - out = append(out, s) - last = s - } - return out -} diff --git a/vendor/github.com/tonistiigi/fsutil/fs.go b/vendor/github.com/tonistiigi/fsutil/fs.go deleted file mode 100644 index db587b77cd..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/fs.go +++ /dev/null @@ -1,119 +0,0 @@ -package fsutil - -import ( - "context" - "io" - "os" - "path" - "path/filepath" - "sort" - "strings" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -type FS interface { - Walk(context.Context, filepath.WalkFunc) error - Open(string) (io.ReadCloser, error) -} - -func NewFS(root string, opt *WalkOpt) FS { - return &fs{ - root: root, - opt: opt, - } -} - -type fs struct { - root string - opt *WalkOpt -} - -func (fs *fs) Walk(ctx context.Context, fn filepath.WalkFunc) error { - return Walk(ctx, fs.root, fs.opt, fn) -} - -func (fs *fs) Open(p string) (io.ReadCloser, error) { - rc, err := os.Open(filepath.Join(fs.root, p)) - return rc, errors.WithStack(err) -} - -type Dir struct { - Stat types.Stat - FS FS -} - -func SubDirFS(dirs []Dir) (FS, error) { - sort.Slice(dirs, func(i, j int) bool { - return dirs[i].Stat.Path < dirs[j].Stat.Path - }) - m := map[string]Dir{} - for _, d := range dirs { - if path.Base(d.Stat.Path) != d.Stat.Path { - return nil, errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EISDIR, Op: "invalid path"}) - } - if _, ok := m[d.Stat.Path]; ok { - return nil, errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EEXIST, Op: "duplicate path"}) - } - m[d.Stat.Path] = d - } - return &subDirFS{m: m, dirs: dirs}, nil -} - -type subDirFS struct { - m map[string]Dir - dirs []Dir -} - -func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { - for _, d := range fs.dirs { - fi := &StatInfo{Stat: &d.Stat} - if !fi.IsDir() { - return errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.ENOTDIR, Op: "walk subdir"}) - } - if err := fn(d.Stat.Path, fi, nil); err != nil { - return err - } - if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) - } - stat.Path = path.Join(d.Stat.Path, stat.Path) - if stat.Linkname != "" { - if fi.Mode()&os.ModeSymlink != 0 { - if strings.HasPrefix(stat.Linkname, "/") { - stat.Linkname = path.Join("/"+d.Stat.Path, stat.Linkname) - } - } else { - stat.Linkname = path.Join(d.Stat.Path, stat.Linkname) - } - } - return fn(filepath.Join(d.Stat.Path, p), &StatInfo{stat}, nil) - }); err != nil { - return err - } - } - return nil -} - -func (fs *subDirFS) Open(p string) (io.ReadCloser, error) { - parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2) - if len(parts) == 0 { - return io.NopCloser(&emptyReader{}), nil - } - d, ok := fs.m[parts[0]] - if !ok { - return nil, errors.WithStack(&os.PathError{Path: parts[0], Err: syscall.ENOENT, Op: "open"}) - } - return d.FS.Open(parts[1]) -} - -type emptyReader struct { -} - -func (*emptyReader) Read([]byte) (int, error) { - return 0, io.EOF -} diff --git a/vendor/github.com/tonistiigi/fsutil/hardlinks.go b/vendor/github.com/tonistiigi/fsutil/hardlinks.go deleted file mode 100644 index ef8bbfb5da..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/hardlinks.go +++ /dev/null @@ -1,48 +0,0 @@ -package fsutil - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -// Hardlinks validates that all targets for links were part of the changes - -type Hardlinks struct { - seenFiles map[string]struct{} -} - -func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if v.seenFiles == nil { - v.seenFiles = make(map[string]struct{}) - } - - if kind == ChangeKindDelete { - return nil - } - - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EBADMSG, Op: "change without stat info"}) - } - - if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 { - return nil - } - - if len(stat.Linkname) > 0 { - if _, ok := v.seenFiles[stat.Linkname]; !ok { - return errors.Errorf("invalid link %s to unknown path: %q", p, stat.Linkname) - } - } else { - v.seenFiles[p] = struct{}{} - } - - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/readme.md b/vendor/github.com/tonistiigi/fsutil/readme.md deleted file mode 100644 index 5ce685b7ed..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/readme.md +++ /dev/null @@ -1,45 +0,0 @@ -Incremental file directory sync tools in golang. - -``` -BENCH_FILE_SIZE=10000 ./bench.test --test.bench . -BenchmarkCopyWithTar10-4 2000 995242 ns/op -BenchmarkCopyWithTar50-4 300 4710021 ns/op -BenchmarkCopyWithTar200-4 100 16627260 ns/op -BenchmarkCopyWithTar1000-4 20 60031459 ns/op -BenchmarkCPA10-4 1000 1678367 ns/op -BenchmarkCPA50-4 500 3690306 ns/op -BenchmarkCPA200-4 200 9495066 ns/op -BenchmarkCPA1000-4 50 29769289 ns/op -BenchmarkDiffCopy10-4 2000 943889 ns/op -BenchmarkDiffCopy50-4 500 3285950 ns/op -BenchmarkDiffCopy200-4 200 8563792 ns/op -BenchmarkDiffCopy1000-4 50 29511340 ns/op -BenchmarkDiffCopyProto10-4 2000 944615 ns/op -BenchmarkDiffCopyProto50-4 500 3334940 ns/op -BenchmarkDiffCopyProto200-4 200 9420038 ns/op -BenchmarkDiffCopyProto1000-4 50 30632429 ns/op -BenchmarkIncrementalDiffCopy10-4 2000 691993 ns/op -BenchmarkIncrementalDiffCopy50-4 1000 1304253 ns/op -BenchmarkIncrementalDiffCopy200-4 500 3306519 ns/op -BenchmarkIncrementalDiffCopy1000-4 200 10211343 ns/op -BenchmarkIncrementalDiffCopy5000-4 20 55194427 ns/op -BenchmarkIncrementalDiffCopy10000-4 20 91759289 ns/op -BenchmarkIncrementalCopyWithTar10-4 2000 1020258 ns/op -BenchmarkIncrementalCopyWithTar50-4 300 5348786 ns/op -BenchmarkIncrementalCopyWithTar200-4 100 19495000 ns/op -BenchmarkIncrementalCopyWithTar1000-4 20 70338507 ns/op -BenchmarkIncrementalRsync10-4 30 45215754 ns/op -BenchmarkIncrementalRsync50-4 30 45837260 ns/op -BenchmarkIncrementalRsync200-4 30 48780614 ns/op -BenchmarkIncrementalRsync1000-4 20 54801892 ns/op -BenchmarkIncrementalRsync5000-4 20 84782542 ns/op -BenchmarkIncrementalRsync10000-4 10 103355108 ns/op -BenchmarkRsync10-4 30 46776470 ns/op -BenchmarkRsync50-4 30 48601555 ns/op -BenchmarkRsync200-4 20 59642691 ns/op -BenchmarkRsync1000-4 20 101343010 ns/op -BenchmarkGnuTar10-4 500 3171448 ns/op -BenchmarkGnuTar50-4 300 5030296 ns/op -BenchmarkGnuTar200-4 100 10464313 ns/op -BenchmarkGnuTar1000-4 50 30375257 ns/op -``` \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go deleted file mode 100644 index 209d1d2faf..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/receive.go +++ /dev/null @@ -1,286 +0,0 @@ -package fsutil - -import ( - "context" - "io" - "os" - "sync" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -type DiffType int - -const ( - DiffMetadata DiffType = iota - DiffNone - DiffContent -) - -type ReceiveOpt struct { - NotifyHashed ChangeFunc - ContentHasher ContentHasher - ProgressCb func(int, bool) - Merge bool - Filter FilterFunc - Differ DiffType -} - -func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - r := &receiver{ - conn: &syncStream{Stream: conn}, - dest: dest, - files: make(map[string]uint32), - pipes: make(map[uint32]io.WriteCloser), - notifyHashed: opt.NotifyHashed, - contentHasher: opt.ContentHasher, - progressCb: opt.ProgressCb, - merge: opt.Merge, - filter: opt.Filter, - differ: opt.Differ, - } - return r.run(ctx) -} - -type receiver struct { - dest string - conn Stream - files map[string]uint32 - pipes map[uint32]io.WriteCloser - mu sync.RWMutex - muPipes sync.RWMutex - progressCb func(int, bool) - merge bool - filter FilterFunc - differ DiffType - - notifyHashed ChangeFunc - contentHasher ContentHasher - orderValidator Validator - hlValidator Hardlinks -} - -type dynamicWalker struct { - walkChan chan *currentPath - err error - closeCh chan struct{} -} - -func newDynamicWalker() *dynamicWalker { - return &dynamicWalker{ - walkChan: make(chan *currentPath, 128), - closeCh: make(chan struct{}), - } -} - -func (w *dynamicWalker) update(p *currentPath) error { - select { - case <-w.closeCh: - return errors.Wrap(w.err, "walker is closed") - default: - } - if p == nil { - close(w.walkChan) - return nil - } - select { - case w.walkChan <- p: - return nil - case <-w.closeCh: - return errors.Wrap(w.err, "walker is closed") - } -} - -func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) error { - for { - select { - case p, ok := <-w.walkChan: - if !ok { - return nil - } - select { - case pathC <- p: - case <-ctx.Done(): - w.err = ctx.Err() - close(w.closeCh) - return ctx.Err() - } - case <-ctx.Done(): - w.err = ctx.Err() - close(w.closeCh) - return ctx.Err() - } - } -} - -func (r *receiver) run(ctx context.Context) error { - g, ctx := errgroup.WithContext(ctx) - - dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{ - AsyncDataCb: r.asyncDataFunc, - NotifyCb: r.notifyHashed, - ContentHasher: r.contentHasher, - Filter: r.filter, - }) - if err != nil { - return err - } - - w := newDynamicWalker() - - g.Go(func() (retErr error) { - defer func() { - if retErr != nil { - r.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(retErr.Error())}) - } - }() - destWalker := emptyWalker - if !r.merge { - destWalker = getWalkerFn(r.dest) - } - err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill, r.filter, r.differ) - if err != nil { - return err - } - if err := dw.Wait(ctx); err != nil { - return err - } - r.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN}) - return nil - }) - - g.Go(func() error { - var i uint32 = 0 - - size := 0 - if r.progressCb != nil { - defer func() { - r.progressCb(size, true) - }() - } - var p types.Packet - for { - p = types.Packet{Data: p.Data[:0]} - if err := r.conn.RecvMsg(&p); err != nil { - return err - } - if r.progressCb != nil { - size += p.Size() - r.progressCb(size, false) - } - - switch p.Type { - case types.PACKET_ERR: - return errors.Errorf("error from sender: %s", p.Data) - case types.PACKET_STAT: - if p.Stat == nil { - if err := w.update(nil); err != nil { - return err - } - break - } - if fileCanRequestData(os.FileMode(p.Stat.Mode)) { - r.mu.Lock() - r.files[p.Stat.Path] = i - r.mu.Unlock() - } - i++ - cp := ¤tPath{path: p.Stat.Path, stat: p.Stat} - if err := r.orderValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { - return err - } - if err := r.hlValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { - return err - } - if err := w.update(cp); err != nil { - return err - } - case types.PACKET_DATA: - r.muPipes.Lock() - pw, ok := r.pipes[p.ID] - r.muPipes.Unlock() - if !ok { - return errors.Errorf("invalid file request %d", p.ID) - } - if len(p.Data) == 0 { - if err := pw.Close(); err != nil { - return err - } - } else { - if _, err := pw.Write(p.Data); err != nil { - return err - } - } - case types.PACKET_FIN: - for { - var p types.Packet - if err := r.conn.RecvMsg(&p); err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - } - }) - return g.Wait() -} - -func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteCloser) error { - r.mu.Lock() - id, ok := r.files[p] - if !ok { - r.mu.Unlock() - return errors.Errorf("invalid file request %s", p) - } - delete(r.files, p) - r.mu.Unlock() - - wwc := newWrappedWriteCloser(wc) - r.muPipes.Lock() - r.pipes[id] = wwc - r.muPipes.Unlock() - if err := r.conn.SendMsg(&types.Packet{Type: types.PACKET_REQ, ID: id}); err != nil { - return err - } - err := wwc.Wait(ctx) - if err != nil { - return err - } - r.muPipes.Lock() - delete(r.pipes, id) - r.muPipes.Unlock() - return nil -} - -type wrappedWriteCloser struct { - io.WriteCloser - err error - once sync.Once - done chan struct{} -} - -func newWrappedWriteCloser(wc io.WriteCloser) *wrappedWriteCloser { - return &wrappedWriteCloser{WriteCloser: wc, done: make(chan struct{})} -} - -func (w *wrappedWriteCloser) Close() error { - w.err = w.WriteCloser.Close() - w.once.Do(func() { close(w.done) }) - return w.err -} - -func (w *wrappedWriteCloser) Wait(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return w.err - } -} diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go deleted file mode 100644 index f1c51b8365..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/send.go +++ /dev/null @@ -1,208 +0,0 @@ -package fsutil - -import ( - "context" - "io" - "os" - "sync" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 32*1<<10) - return &buf - }, -} - -type Stream interface { - RecvMsg(interface{}) error - SendMsg(m interface{}) error - Context() context.Context -} - -func Send(ctx context.Context, conn Stream, fs FS, progressCb func(int, bool)) error { - s := &sender{ - conn: &syncStream{Stream: conn}, - fs: fs, - files: make(map[uint32]string), - progressCb: progressCb, - sendpipeline: make(chan *sendHandle, 128), - } - return s.run(ctx) -} - -type sendHandle struct { - id uint32 - path string -} - -type sender struct { - conn Stream - fs FS - files map[uint32]string - mu sync.RWMutex - progressCb func(int, bool) - progressCurrent int - sendpipeline chan *sendHandle -} - -func (s *sender) run(ctx context.Context) error { - g, ctx := errgroup.WithContext(ctx) - - defer s.updateProgress(0, true) - - g.Go(func() error { - err := s.walk(ctx) - if err != nil { - s.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(err.Error())}) - } - return err - }) - - for i := 0; i < 4; i++ { - g.Go(func() error { - for h := range s.sendpipeline { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if err := s.sendFile(h); err != nil { - return err - } - } - return nil - }) - } - - g.Go(func() error { - defer close(s.sendpipeline) - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - var p types.Packet - if err := s.conn.RecvMsg(&p); err != nil { - return err - } - switch p.Type { - case types.PACKET_ERR: - return errors.Errorf("error from receiver: %s", p.Data) - case types.PACKET_REQ: - if err := s.queue(p.ID); err != nil { - return err - } - case types.PACKET_FIN: - return s.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN}) - } - } - }) - - return g.Wait() -} - -func (s *sender) updateProgress(size int, last bool) { - if s.progressCb != nil { - s.progressCurrent += size - s.progressCb(s.progressCurrent, last) - } -} - -func (s *sender) queue(id uint32) error { - s.mu.Lock() - p, ok := s.files[id] - if !ok { - s.mu.Unlock() - return errors.Errorf("invalid file id %d", id) - } - delete(s.files, id) - s.mu.Unlock() - s.sendpipeline <- &sendHandle{id, p} - return nil -} - -func (s *sender) sendFile(h *sendHandle) error { - f, err := s.fs.Open(h.path) - if err == nil { - defer f.Close() - buf := bufPool.Get().(*[]byte) - defer bufPool.Put(buf) - if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, struct{ io.Reader }{f}, *buf); err != nil { - return err - } - } - return s.conn.SendMsg(&types.Packet{ID: h.id, Type: types.PACKET_DATA}) -} - -func (s *sender) walk(ctx context.Context) error { - var i uint32 = 0 - err := s.fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) - } - - p := &types.Packet{ - Type: types.PACKET_STAT, - Stat: stat, - } - if fileCanRequestData(os.FileMode(stat.Mode)) { - s.mu.Lock() - s.files[i] = stat.Path - s.mu.Unlock() - } - i++ - s.updateProgress(p.Size(), false) - return errors.Wrapf(s.conn.SendMsg(p), "failed to send stat %s", path) - }) - if err != nil { - return err - } - return errors.Wrapf(s.conn.SendMsg(&types.Packet{Type: types.PACKET_STAT}), "failed to send last stat") -} - -func fileCanRequestData(m os.FileMode) bool { - // avoid updating this function as it needs to match between sender/receiver. - // version if needed - return m&os.ModeType == 0 -} - -type fileSender struct { - sender *sender - id uint32 -} - -func (fs *fileSender) Write(dt []byte) (int, error) { - if len(dt) == 0 { - return 0, nil - } - p := &types.Packet{Type: types.PACKET_DATA, ID: fs.id, Data: dt} - if err := fs.sender.conn.SendMsg(p); err != nil { - return 0, err - } - fs.sender.updateProgress(p.Size(), false) - return len(dt), nil -} - -type syncStream struct { - Stream - mu sync.Mutex -} - -func (ss *syncStream) SendMsg(m interface{}) error { - ss.mu.Lock() - err := ss.Stream.SendMsg(m) - ss.mu.Unlock() - return err -} diff --git a/vendor/github.com/tonistiigi/fsutil/stat.go b/vendor/github.com/tonistiigi/fsutil/stat.go deleted file mode 100644 index 2ab8da118e..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/stat.go +++ /dev/null @@ -1,64 +0,0 @@ -package fsutil - -import ( - "os" - "path/filepath" - "runtime" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -// constructs a Stat object. path is where the path can be found right -// now, relpath is the desired path to be recorded in the stat (so -// relative to whatever base dir is relevant). fi is the os.Stat -// info. inodemap is used to calculate hardlinks over a series of -// mkstat calls and maps inode to the canonical (aka "first") path for -// a set of hardlinks to that inode. -func mkstat(path, relpath string, fi os.FileInfo, inodemap map[uint64]string) (*types.Stat, error) { - relpath = filepath.ToSlash(relpath) - - stat := &types.Stat{ - Path: relpath, - Mode: uint32(fi.Mode()), - ModTime: fi.ModTime().UnixNano(), - } - - setUnixOpt(fi, stat, relpath, inodemap) - - if !fi.IsDir() { - stat.Size_ = fi.Size() - if fi.Mode()&os.ModeSymlink != 0 { - link, err := os.Readlink(path) - if err != nil { - return nil, errors.WithStack(err) - } - stat.Linkname = link - } - } - if err := loadXattr(path, stat); err != nil { - return nil, err - } - - if runtime.GOOS == "windows" { - permPart := stat.Mode & uint32(os.ModePerm) - noPermPart := stat.Mode &^ uint32(os.ModePerm) - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - stat.Mode = noPermPart | permPart - } - - // Clear the socket bit since archive/tar.FileInfoHeader does not handle it - stat.Mode &^= uint32(os.ModeSocket) - - return stat, nil -} - -func Stat(path string) (*types.Stat, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, errors.WithStack(err) - } - return mkstat(path, filepath.Base(path), fi, nil) -} diff --git a/vendor/github.com/tonistiigi/fsutil/stat_unix.go b/vendor/github.com/tonistiigi/fsutil/stat_unix.go deleted file mode 100644 index 5923aefef1..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/stat_unix.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !windows -// +build !windows - -package fsutil - -import ( - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func loadXattr(origpath string, stat *types.Stat) error { - xattrs, err := sysx.LListxattr(origpath) - if err != nil { - if errors.Is(err, syscall.ENOTSUP) { - return nil - } - return errors.Wrapf(err, "failed to xattr %s", origpath) - } - if len(xattrs) > 0 { - m := make(map[string][]byte) - for _, key := range xattrs { - v, err := sysx.LGetxattr(origpath, key) - if err == nil { - m[key] = v - } - } - stat.Xattrs = m - } - return nil -} - -func setUnixOpt(fi os.FileInfo, stat *types.Stat, path string, seenFiles map[uint64]string) { - s := fi.Sys().(*syscall.Stat_t) - - stat.Uid = s.Uid - stat.Gid = s.Gid - - if !fi.IsDir() { - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - stat.Devmajor = int64(major(uint64(s.Rdev))) - stat.Devminor = int64(minor(uint64(s.Rdev))) - } - - ino := s.Ino - linked := false - if seenFiles != nil { - if s.Nlink > 1 { - if oldpath, ok := seenFiles[ino]; ok { - stat.Linkname = oldpath - stat.Size_ = 0 - linked = true - } - } - if !linked { - seenFiles[ino] = path - } - } - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} diff --git a/vendor/github.com/tonistiigi/fsutil/stat_windows.go b/vendor/github.com/tonistiigi/fsutil/stat_windows.go deleted file mode 100644 index 66379bd84c..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/stat_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build windows - -package fsutil - -import ( - "os" - - "github.com/tonistiigi/fsutil/types" -) - -func loadXattr(_ string, _ *types.Stat) error { - return nil -} - -func setUnixOpt(_ os.FileInfo, _ *types.Stat, _ string, _ map[uint64]string) { -} diff --git a/vendor/github.com/tonistiigi/fsutil/tarwriter.go b/vendor/github.com/tonistiigi/fsutil/tarwriter.go deleted file mode 100644 index bd46a2250f..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/tarwriter.go +++ /dev/null @@ -1,80 +0,0 @@ -package fsutil - -import ( - "archive/tar" - "context" - "io" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func WriteTar(ctx context.Context, fs FS, w io.Writer) error { - tw := tar.NewWriter(w) - err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { - if err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) - } - hdr, err := tar.FileInfoHeader(fi, stat.Linkname) - if err != nil { - return err - } - - name := filepath.ToSlash(path) - if fi.IsDir() && !strings.HasSuffix(name, "/") { - name += "/" - } - hdr.Name = name - - hdr.Uid = int(stat.Uid) - hdr.Gid = int(stat.Gid) - hdr.Devmajor = stat.Devmajor - hdr.Devminor = stat.Devminor - hdr.Linkname = stat.Linkname - if hdr.Linkname != "" { - hdr.Size = 0 - if fi.Mode()&os.ModeSymlink != 0 { - hdr.Typeflag = tar.TypeSymlink - } else { - hdr.Typeflag = tar.TypeLink - } - } - - if len(stat.Xattrs) > 0 { - hdr.PAXRecords = map[string]string{} - } - for k, v := range stat.Xattrs { - hdr.PAXRecords["SCHILY.xattr."+k] = string(v) - } - - if err := tw.WriteHeader(hdr); err != nil { - return errors.Wrapf(err, "failed to write file header %s", name) - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" { - rc, err := fs.Open(path) - if err != nil { - return err - } - if _, err := io.Copy(tw, rc); err != nil { - return errors.WithStack(err) - } - if err := rc.Close(); err != nil { - return errors.WithStack(err) - } - } - return nil - }) - if err != nil { - return err - } - return tw.Close() -} diff --git a/vendor/github.com/tonistiigi/fsutil/types/generate.go b/vendor/github.com/tonistiigi/fsutil/types/generate.go deleted file mode 100644 index 5c03178f36..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package types - -//go:generate protoc --gogoslick_out=. stat.proto wire.proto diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.go b/vendor/github.com/tonistiigi/fsutil/types/stat.go deleted file mode 100644 index b79fd2bd76..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/stat.go +++ /dev/null @@ -1,7 +0,0 @@ -package types - -import "os" - -func (s Stat) IsDir() bool { - return os.FileMode(s.Mode).IsDir() -} diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go b/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go deleted file mode 100644 index 91200fb779..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go +++ /dev/null @@ -1,929 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: stat.proto - -package types - -import ( - bytes "bytes" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Stat struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - Uid uint32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,4,opt,name=gid,proto3" json:"gid,omitempty"` - Size_ int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` - ModTime int64 `protobuf:"varint,6,opt,name=modTime,proto3" json:"modTime,omitempty"` - // int32 typeflag = 7; - Linkname string `protobuf:"bytes,7,opt,name=linkname,proto3" json:"linkname,omitempty"` - Devmajor int64 `protobuf:"varint,8,opt,name=devmajor,proto3" json:"devmajor,omitempty"` - Devminor int64 `protobuf:"varint,9,opt,name=devminor,proto3" json:"devminor,omitempty"` - Xattrs map[string][]byte `protobuf:"bytes,10,rep,name=xattrs,proto3" json:"xattrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *Stat) Reset() { *m = Stat{} } -func (*Stat) ProtoMessage() {} -func (*Stat) Descriptor() ([]byte, []int) { - return fileDescriptor_01fabdc1b78bd68b, []int{0} -} -func (m *Stat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stat) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stat.Merge(m, src) -} -func (m *Stat) XXX_Size() int { - return m.Size() -} -func (m *Stat) XXX_DiscardUnknown() { - xxx_messageInfo_Stat.DiscardUnknown(m) -} - -var xxx_messageInfo_Stat proto.InternalMessageInfo - -func (m *Stat) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *Stat) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *Stat) GetUid() uint32 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *Stat) GetGid() uint32 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *Stat) GetSize_() int64 { - if m != nil { - return m.Size_ - } - return 0 -} - -func (m *Stat) GetModTime() int64 { - if m != nil { - return m.ModTime - } - return 0 -} - -func (m *Stat) GetLinkname() string { - if m != nil { - return m.Linkname - } - return "" -} - -func (m *Stat) GetDevmajor() int64 { - if m != nil { - return m.Devmajor - } - return 0 -} - -func (m *Stat) GetDevminor() int64 { - if m != nil { - return m.Devminor - } - return 0 -} - -func (m *Stat) GetXattrs() map[string][]byte { - if m != nil { - return m.Xattrs - } - return nil -} - -func init() { - proto.RegisterType((*Stat)(nil), "fsutil.types.Stat") - proto.RegisterMapType((map[string][]byte)(nil), "fsutil.types.Stat.XattrsEntry") -} - -func init() { proto.RegisterFile("stat.proto", fileDescriptor_01fabdc1b78bd68b) } - -var fileDescriptor_01fabdc1b78bd68b = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbd, 0x4e, 0xc3, 0x30, - 0x14, 0x85, 0x73, 0x9b, 0xfe, 0xba, 0x45, 0x42, 0x16, 0x83, 0xd5, 0xe1, 0x2a, 0x62, 0xca, 0x94, - 0x01, 0x24, 0xc4, 0xcf, 0x86, 0xc4, 0x0b, 0x04, 0x06, 0xc4, 0x66, 0x64, 0x53, 0x4c, 0x9b, 0xb8, - 0x4a, 0xdc, 0x8a, 0x32, 0xf1, 0x08, 0x3c, 0x06, 0x6f, 0x02, 0x63, 0xc7, 0x8e, 0xd4, 0x5d, 0x18, - 0xfb, 0x08, 0xc8, 0x4e, 0x5b, 0xba, 0x9d, 0xf3, 0x9d, 0x7b, 0x95, 0x9c, 0x6b, 0x42, 0x4a, 0xc3, - 0x4d, 0x32, 0x2e, 0xb4, 0xd1, 0xb4, 0xf7, 0x54, 0x4e, 0x8c, 0x1a, 0x25, 0x66, 0x36, 0x96, 0xe5, - 0xf1, 0x57, 0x8d, 0xd4, 0x6f, 0x0d, 0x37, 0x94, 0x92, 0xfa, 0x98, 0x9b, 0x67, 0x06, 0x11, 0xc4, - 0x9d, 0xd4, 0x6b, 0xc7, 0x32, 0x2d, 0x24, 0xab, 0x45, 0x10, 0x1f, 0xa4, 0x5e, 0xd3, 0x43, 0x12, - 0x4e, 0x94, 0x60, 0xa1, 0x47, 0x4e, 0x3a, 0x32, 0x50, 0x82, 0xd5, 0x2b, 0x32, 0x50, 0xc2, 0xed, - 0x95, 0xea, 0x4d, 0xb2, 0x46, 0x04, 0x71, 0x98, 0x7a, 0x4d, 0x19, 0x69, 0x65, 0x5a, 0xdc, 0xa9, - 0x4c, 0xb2, 0xa6, 0xc7, 0x5b, 0x4b, 0xfb, 0xa4, 0x3d, 0x52, 0xf9, 0x30, 0xe7, 0x99, 0x64, 0x2d, - 0xff, 0xf5, 0x9d, 0x77, 0x99, 0x90, 0xd3, 0x8c, 0xbf, 0xe8, 0x82, 0xb5, 0xfd, 0xda, 0xce, 0x6f, - 0x33, 0x95, 0xeb, 0x82, 0x75, 0xfe, 0x33, 0xe7, 0xe9, 0x19, 0x69, 0xbe, 0x72, 0x63, 0x8a, 0x92, - 0x91, 0x28, 0x8c, 0xbb, 0x27, 0x98, 0xec, 0xb7, 0x4e, 0x5c, 0xe3, 0xe4, 0xde, 0x0f, 0xdc, 0xe4, - 0xa6, 0x98, 0xa5, 0x9b, 0xe9, 0xfe, 0x05, 0xe9, 0xee, 0x61, 0x57, 0x6d, 0x28, 0x67, 0x9b, 0x9b, - 0x38, 0x49, 0x8f, 0x48, 0x63, 0xca, 0x47, 0x93, 0xea, 0x26, 0xbd, 0xb4, 0x32, 0x97, 0xb5, 0x73, - 0xb8, 0xbe, 0x9a, 0x2f, 0x31, 0x58, 0x2c, 0x31, 0x58, 0x2f, 0x11, 0xde, 0x2d, 0xc2, 0xa7, 0x45, - 0xf8, 0xb6, 0x08, 0x73, 0x8b, 0xf0, 0x63, 0x11, 0x7e, 0x2d, 0x06, 0x6b, 0x8b, 0xf0, 0xb1, 0xc2, - 0x60, 0xbe, 0xc2, 0x60, 0xb1, 0xc2, 0xe0, 0xa1, 0xe1, 0x7f, 0xe8, 0xb1, 0xe9, 0xdf, 0xe6, 0xf4, - 0x2f, 0x00, 0x00, 0xff, 0xff, 0x06, 0x97, 0xf3, 0xd7, 0xa9, 0x01, 0x00, 0x00, -} - -func (this *Stat) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Stat) - if !ok { - that2, ok := that.(Stat) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Path != that1.Path { - return false - } - if this.Mode != that1.Mode { - return false - } - if this.Uid != that1.Uid { - return false - } - if this.Gid != that1.Gid { - return false - } - if this.Size_ != that1.Size_ { - return false - } - if this.ModTime != that1.ModTime { - return false - } - if this.Linkname != that1.Linkname { - return false - } - if this.Devmajor != that1.Devmajor { - return false - } - if this.Devminor != that1.Devminor { - return false - } - if len(this.Xattrs) != len(that1.Xattrs) { - return false - } - for i := range this.Xattrs { - if !bytes.Equal(this.Xattrs[i], that1.Xattrs[i]) { - return false - } - } - return true -} -func (this *Stat) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&types.Stat{") - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n") - s = append(s, "Uid: "+fmt.Sprintf("%#v", this.Uid)+",\n") - s = append(s, "Gid: "+fmt.Sprintf("%#v", this.Gid)+",\n") - s = append(s, "Size_: "+fmt.Sprintf("%#v", this.Size_)+",\n") - s = append(s, "ModTime: "+fmt.Sprintf("%#v", this.ModTime)+",\n") - s = append(s, "Linkname: "+fmt.Sprintf("%#v", this.Linkname)+",\n") - s = append(s, "Devmajor: "+fmt.Sprintf("%#v", this.Devmajor)+",\n") - s = append(s, "Devminor: "+fmt.Sprintf("%#v", this.Devminor)+",\n") - keysForXattrs := make([]string, 0, len(this.Xattrs)) - for k, _ := range this.Xattrs { - keysForXattrs = append(keysForXattrs, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) - mapStringForXattrs := "map[string][]byte{" - for _, k := range keysForXattrs { - mapStringForXattrs += fmt.Sprintf("%#v: %#v,", k, this.Xattrs[k]) - } - mapStringForXattrs += "}" - if this.Xattrs != nil { - s = append(s, "Xattrs: "+mapStringForXattrs+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringStat(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Stat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Xattrs) > 0 { - for k := range m.Xattrs { - v := m.Xattrs[k] - baseI := i - if len(v) > 0 { - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintStat(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintStat(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintStat(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if m.Devminor != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Devminor)) - i-- - dAtA[i] = 0x48 - } - if m.Devmajor != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Devmajor)) - i-- - dAtA[i] = 0x40 - } - if len(m.Linkname) > 0 { - i -= len(m.Linkname) - copy(dAtA[i:], m.Linkname) - i = encodeVarintStat(dAtA, i, uint64(len(m.Linkname))) - i-- - dAtA[i] = 0x3a - } - if m.ModTime != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.ModTime)) - i-- - dAtA[i] = 0x30 - } - if m.Size_ != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x28 - } - if m.Gid != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Gid)) - i-- - dAtA[i] = 0x20 - } - if m.Uid != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x18 - } - if m.Mode != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintStat(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintStat(dAtA []byte, offset int, v uint64) int { - offset -= sovStat(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Stat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovStat(uint64(l)) - } - if m.Mode != 0 { - n += 1 + sovStat(uint64(m.Mode)) - } - if m.Uid != 0 { - n += 1 + sovStat(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovStat(uint64(m.Gid)) - } - if m.Size_ != 0 { - n += 1 + sovStat(uint64(m.Size_)) - } - if m.ModTime != 0 { - n += 1 + sovStat(uint64(m.ModTime)) - } - l = len(m.Linkname) - if l > 0 { - n += 1 + l + sovStat(uint64(l)) - } - if m.Devmajor != 0 { - n += 1 + sovStat(uint64(m.Devmajor)) - } - if m.Devminor != 0 { - n += 1 + sovStat(uint64(m.Devminor)) - } - if len(m.Xattrs) > 0 { - for k, v := range m.Xattrs { - _ = k - _ = v - l = 0 - if len(v) > 0 { - l = 1 + len(v) + sovStat(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovStat(uint64(len(k))) + l - n += mapEntrySize + 1 + sovStat(uint64(mapEntrySize)) - } - } - return n -} - -func sovStat(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStat(x uint64) (n int) { - return sovStat(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Stat) String() string { - if this == nil { - return "nil" - } - keysForXattrs := make([]string, 0, len(this.Xattrs)) - for k, _ := range this.Xattrs { - keysForXattrs = append(keysForXattrs, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) - mapStringForXattrs := "map[string][]byte{" - for _, k := range keysForXattrs { - mapStringForXattrs += fmt.Sprintf("%v: %v,", k, this.Xattrs[k]) - } - mapStringForXattrs += "}" - s := strings.Join([]string{`&Stat{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, - `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, - `Gid:` + fmt.Sprintf("%v", this.Gid) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `ModTime:` + fmt.Sprintf("%v", this.ModTime) + `,`, - `Linkname:` + fmt.Sprintf("%v", this.Linkname) + `,`, - `Devmajor:` + fmt.Sprintf("%v", this.Devmajor) + `,`, - `Devminor:` + fmt.Sprintf("%v", this.Devminor) + `,`, - `Xattrs:` + mapStringForXattrs + `,`, - `}`, - }, "") - return s -} -func valueToStringStat(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Stat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Stat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Stat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStat - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModTime", wireType) - } - m.ModTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModTime |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStat - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Linkname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Devmajor", wireType) - } - m.Devmajor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Devmajor |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Devminor", wireType) - } - m.Devminor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Devminor |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xattrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStat - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Xattrs == nil { - m.Xattrs = make(map[string][]byte) - } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthStat - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthStat - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthStat - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthStat - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipStat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStat - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Xattrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStat - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStat - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStat(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStat - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStat - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStat - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStat = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStat = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStat = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.proto b/vendor/github.com/tonistiigi/fsutil/types/stat.proto deleted file mode 100644 index 4138be6945..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/stat.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package fsutil.types; - -option go_package = "types"; - -message Stat { - string path = 1; - uint32 mode = 2; - uint32 uid = 3; - uint32 gid = 4; - int64 size = 5; - int64 modTime = 6; - // int32 typeflag = 7; - string linkname = 7; - int64 devmajor = 8; - int64 devminor = 9; - map xattrs = 10; -} \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go b/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go deleted file mode 100644 index 9e22269e95..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go +++ /dev/null @@ -1,575 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: wire.proto - -package types - -import ( - bytes "bytes" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Packet_PacketType int32 - -const ( - PACKET_STAT Packet_PacketType = 0 - PACKET_REQ Packet_PacketType = 1 - PACKET_DATA Packet_PacketType = 2 - PACKET_FIN Packet_PacketType = 3 - PACKET_ERR Packet_PacketType = 4 -) - -var Packet_PacketType_name = map[int32]string{ - 0: "PACKET_STAT", - 1: "PACKET_REQ", - 2: "PACKET_DATA", - 3: "PACKET_FIN", - 4: "PACKET_ERR", -} - -var Packet_PacketType_value = map[string]int32{ - "PACKET_STAT": 0, - "PACKET_REQ": 1, - "PACKET_DATA": 2, - "PACKET_FIN": 3, - "PACKET_ERR": 4, -} - -func (Packet_PacketType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f2dcdddcdf68d8e0, []int{0, 0} -} - -type Packet struct { - Type Packet_PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=fsutil.types.Packet_PacketType" json:"type,omitempty"` - Stat *Stat `protobuf:"bytes,2,opt,name=stat,proto3" json:"stat,omitempty"` - ID uint32 `protobuf:"varint,3,opt,name=ID,proto3" json:"ID,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *Packet) Reset() { *m = Packet{} } -func (*Packet) ProtoMessage() {} -func (*Packet) Descriptor() ([]byte, []int) { - return fileDescriptor_f2dcdddcdf68d8e0, []int{0} -} -func (m *Packet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Packet.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Packet) XXX_Merge(src proto.Message) { - xxx_messageInfo_Packet.Merge(m, src) -} -func (m *Packet) XXX_Size() int { - return m.Size() -} -func (m *Packet) XXX_DiscardUnknown() { - xxx_messageInfo_Packet.DiscardUnknown(m) -} - -var xxx_messageInfo_Packet proto.InternalMessageInfo - -func (m *Packet) GetType() Packet_PacketType { - if m != nil { - return m.Type - } - return PACKET_STAT -} - -func (m *Packet) GetStat() *Stat { - if m != nil { - return m.Stat - } - return nil -} - -func (m *Packet) GetID() uint32 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Packet) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterEnum("fsutil.types.Packet_PacketType", Packet_PacketType_name, Packet_PacketType_value) - proto.RegisterType((*Packet)(nil), "fsutil.types.Packet") -} - -func init() { proto.RegisterFile("wire.proto", fileDescriptor_f2dcdddcdf68d8e0) } - -var fileDescriptor_f2dcdddcdf68d8e0 = []byte{ - // 276 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, - 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2b, 0x2e, 0x2d, 0xc9, 0xcc, 0xd1, 0x2b, - 0xa9, 0x2c, 0x48, 0x2d, 0x96, 0xe2, 0x2a, 0x2e, 0x49, 0x2c, 0x81, 0xc8, 0x28, 0xbd, 0x64, 0xe4, - 0x62, 0x0b, 0x48, 0x4c, 0xce, 0x4e, 0x2d, 0x11, 0x32, 0xe6, 0x62, 0x01, 0xc9, 0x4b, 0x30, 0x2a, - 0x30, 0x6a, 0xf0, 0x19, 0xc9, 0xeb, 0x21, 0xeb, 0xd1, 0x83, 0xa8, 0x81, 0x52, 0x21, 0x95, 0x05, - 0xa9, 0x41, 0x60, 0xc5, 0x42, 0x6a, 0x5c, 0x2c, 0x20, 0xd3, 0x24, 0x98, 0x14, 0x18, 0x35, 0xb8, - 0x8d, 0x84, 0x50, 0x35, 0x05, 0x97, 0x24, 0x96, 0x04, 0x81, 0xe5, 0x85, 0xf8, 0xb8, 0x98, 0x3c, - 0x5d, 0x24, 0x98, 0x15, 0x18, 0x35, 0x78, 0x83, 0x98, 0x3c, 0x5d, 0x84, 0x84, 0xb8, 0x58, 0x52, - 0x12, 0x4b, 0x12, 0x25, 0x58, 0x14, 0x18, 0x35, 0x78, 0x82, 0xc0, 0x6c, 0xa5, 0x38, 0x2e, 0x2e, - 0x84, 0xf9, 0x42, 0xfc, 0x5c, 0xdc, 0x01, 0x8e, 0xce, 0xde, 0xae, 0x21, 0xf1, 0xc1, 0x21, 0x8e, - 0x21, 0x02, 0x0c, 0x42, 0x7c, 0x5c, 0x5c, 0x50, 0x81, 0x20, 0xd7, 0x40, 0x01, 0x46, 0x24, 0x05, - 0x2e, 0x8e, 0x21, 0x8e, 0x02, 0x4c, 0x48, 0x0a, 0xdc, 0x3c, 0xfd, 0x04, 0x98, 0x91, 0xf8, 0xae, - 0x41, 0x41, 0x02, 0x2c, 0x4e, 0xd6, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, - 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, - 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, - 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x58, 0xc1, - 0x7e, 0x49, 0x62, 0x03, 0x87, 0x97, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x9d, 0xe3, 0x51, - 0x57, 0x01, 0x00, 0x00, -} - -func (x Packet_PacketType) String() string { - s, ok := Packet_PacketType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *Packet) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Packet) - if !ok { - that2, ok := that.(Packet) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if !this.Stat.Equal(that1.Stat) { - return false - } - if this.ID != that1.ID { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *Packet) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&types.Packet{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - if this.Stat != nil { - s = append(s, "Stat: "+fmt.Sprintf("%#v", this.Stat)+",\n") - } - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringWire(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Packet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Packet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Packet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintWire(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if m.ID != 0 { - i = encodeVarintWire(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x18 - } - if m.Stat != nil { - { - size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWire(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintWire(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintWire(dAtA []byte, offset int, v uint64) int { - offset -= sovWire(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Packet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovWire(uint64(m.Type)) - } - if m.Stat != nil { - l = m.Stat.Size() - n += 1 + l + sovWire(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovWire(uint64(m.ID)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovWire(uint64(l)) - } - return n -} - -func sovWire(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozWire(x uint64) (n int) { - return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Packet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Packet{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Stat:` + strings.Replace(fmt.Sprintf("%v", this.Stat), "Stat", "Stat", 1) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringWire(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Packet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Packet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Packet_PacketType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWire - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWire - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stat == nil { - m.Stat = &Stat{} - } - if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthWire - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthWire - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWire(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipWire(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthWire - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupWire - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthWire - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupWire = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/tonistiigi/fsutil/types/wire.proto b/vendor/github.com/tonistiigi/fsutil/types/wire.proto deleted file mode 100644 index 3e85000c58..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/wire.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package fsutil.types; - -option go_package = "types"; - -import "stat.proto"; - -message Packet { - enum PacketType { - PACKET_STAT = 0; - PACKET_REQ = 1; - PACKET_DATA = 2; - PACKET_FIN = 3; - PACKET_ERR = 4; - } - PacketType type = 1; - Stat stat = 2; - uint32 ID = 3; - bytes data = 4; -} diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go deleted file mode 100644 index 9bd7d94d36..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/validator.go +++ /dev/null @@ -1,93 +0,0 @@ -package fsutil - -import ( - "os" - "path" - "runtime" - "sort" - "strings" - "syscall" - - "github.com/pkg/errors" -) - -type parent struct { - dir string - last string -} - -type Validator struct { - parentDirs []parent -} - -func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - if err != nil { - return err - } - // test that all paths are in order and all parent dirs were present - if v.parentDirs == nil { - v.parentDirs = make([]parent, 1, 10) - } - if runtime.GOOS == "windows" { - p = strings.Replace(p, "\\", "", -1) - } - if p != path.Clean(p) { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "unclean path"}) - } - if path.IsAbs(p) { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "absolute path"}) - } - dir := path.Dir(p) - base := path.Base(p) - if dir == "." { - dir = "" - } - if dir == ".." || strings.HasPrefix(p, "../") { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "escape check"}) - } - - // find a parent dir from saved records - i := sort.Search(len(v.parentDirs), func(i int) bool { - return ComparePath(v.parentDirs[len(v.parentDirs)-1-i].dir, dir) <= 0 - }) - i = len(v.parentDirs) - 1 - i - if i != len(v.parentDirs)-1 { // skipping back to grandparent - v.parentDirs = v.parentDirs[:i+1] - } - - if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base { - return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last)) - } - v.parentDirs[i].last = base - if kind != ChangeKindDelete && fi.IsDir() { - v.parentDirs = append(v.parentDirs, parent{ - dir: path.Join(dir, base), - last: "", - }) - } - // todo: validate invalid mode combinations - return err -} - -func ComparePath(p1, p2 string) int { - // byte-by-byte comparison to be compatible with str<>str - min := min(len(p1), len(p2)) - for i := 0; i < min; i++ { - switch { - case p1[i] == p2[i]: - continue - case p2[i] != '/' && p1[i] < p2[i] || p1[i] == '/': - return -1 - default: - return 1 - } - } - return len(p1) - len(p2) -} - -func min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go deleted file mode 100644 index f95101f319..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ /dev/null @@ -1,357 +0,0 @@ -package fsutil - -import ( - "context" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/moby/patternmatcher" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -type WalkOpt struct { - IncludePatterns []string - ExcludePatterns []string - // FollowPaths contains symlinks that are resolved into include patterns - // before performing the fs walk - FollowPaths []string - Map MapFunc -} - -type MapFunc func(string, *types.Stat) MapResult - -// The result of the walk function controls -// both how WalkDir continues and whether the path is kept. -type MapResult int - -const ( - // Keep the current path and continue. - MapResultKeep MapResult = iota - - // Exclude the current path and continue. - MapResultExclude - - // Exclude the current path, and skip the rest of the dir. - // If path is a dir, skip the current directory. - // If path is a file, skip the rest of the parent directory. - // (This matches the semantics of fs.SkipDir.) - MapResultSkipDir -) - -func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { - root, err := filepath.EvalSymlinks(p) - if err != nil { - return errors.WithStack(&os.PathError{Op: "resolve", Path: root, Err: err}) - } - fi, err := os.Stat(root) - if err != nil { - return errors.WithStack(err) - } - if !fi.IsDir() { - return errors.WithStack(&os.PathError{Op: "walk", Path: root, Err: syscall.ENOTDIR}) - } - - var ( - includePatterns []string - includeMatcher *patternmatcher.PatternMatcher - excludeMatcher *patternmatcher.PatternMatcher - ) - - if opt != nil && opt.IncludePatterns != nil { - includePatterns = make([]string, len(opt.IncludePatterns)) - copy(includePatterns, opt.IncludePatterns) - } - if opt != nil && opt.FollowPaths != nil { - targets, err := FollowLinks(p, opt.FollowPaths) - if err != nil { - return err - } - if targets != nil { - includePatterns = append(includePatterns, targets...) - includePatterns = dedupePaths(includePatterns) - } - } - - patternChars := "*[]?^" - if os.PathSeparator != '\\' { - patternChars += `\` - } - - onlyPrefixIncludes := true - if len(includePatterns) != 0 { - includeMatcher, err = patternmatcher.New(includePatterns) - if err != nil { - return errors.Wrapf(err, "invalid includepatterns: %s", opt.IncludePatterns) - } - - for _, p := range includeMatcher.Patterns() { - if !p.Exclusion() && strings.ContainsAny(patternWithoutTrailingGlob(p), patternChars) { - onlyPrefixIncludes = false - break - } - } - - } - - onlyPrefixExcludeExceptions := true - if opt != nil && opt.ExcludePatterns != nil { - excludeMatcher, err = patternmatcher.New(opt.ExcludePatterns) - if err != nil { - return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns) - } - - for _, p := range excludeMatcher.Patterns() { - if p.Exclusion() && strings.ContainsAny(patternWithoutTrailingGlob(p), patternChars) { - onlyPrefixExcludeExceptions = false - break - } - } - } - - type visitedDir struct { - fi os.FileInfo - path string - origpath string - pathWithSep string - includeMatchInfo patternmatcher.MatchInfo - excludeMatchInfo patternmatcher.MatchInfo - calledFn bool - } - - // used only for include/exclude handling - var parentDirs []visitedDir - - seenFiles := make(map[uint64]string) - return filepath.Walk(root, func(path string, fi os.FileInfo, walkErr error) (retErr error) { - defer func() { - if retErr != nil && isNotExist(retErr) { - retErr = filepath.SkipDir - } - }() - - origpath := path - path, err = filepath.Rel(root, path) - if err != nil { - return err - } - // Skip root - if path == "." { - return nil - } - - var ( - dir visitedDir - isDir bool - ) - if fi != nil { - isDir = fi.IsDir() - } - - if includeMatcher != nil || excludeMatcher != nil { - for len(parentDirs) != 0 { - lastParentDir := parentDirs[len(parentDirs)-1].pathWithSep - if strings.HasPrefix(path, lastParentDir) { - break - } - parentDirs = parentDirs[:len(parentDirs)-1] - } - - if isDir { - dir = visitedDir{ - fi: fi, - path: path, - origpath: origpath, - pathWithSep: path + string(filepath.Separator), - } - } - } - - skip := false - - if includeMatcher != nil { - var parentIncludeMatchInfo patternmatcher.MatchInfo - if len(parentDirs) != 0 { - parentIncludeMatchInfo = parentDirs[len(parentDirs)-1].includeMatchInfo - } - m, matchInfo, err := includeMatcher.MatchesUsingParentResults(path, parentIncludeMatchInfo) - if err != nil { - return errors.Wrap(err, "failed to match includepatterns") - } - - if isDir { - dir.includeMatchInfo = matchInfo - } - - if !m { - if isDir && onlyPrefixIncludes { - // Optimization: we can skip walking this dir if no include - // patterns could match anything inside it. - dirSlash := path + string(filepath.Separator) - for _, pat := range includeMatcher.Patterns() { - if pat.Exclusion() { - continue - } - patStr := patternWithoutTrailingGlob(pat) + string(filepath.Separator) - if strings.HasPrefix(patStr, dirSlash) { - goto passedIncludeFilter - } - } - return filepath.SkipDir - } - passedIncludeFilter: - skip = true - } - } - - if excludeMatcher != nil { - var parentExcludeMatchInfo patternmatcher.MatchInfo - if len(parentDirs) != 0 { - parentExcludeMatchInfo = parentDirs[len(parentDirs)-1].excludeMatchInfo - } - m, matchInfo, err := excludeMatcher.MatchesUsingParentResults(path, parentExcludeMatchInfo) - if err != nil { - return errors.Wrap(err, "failed to match excludepatterns") - } - - if isDir { - dir.excludeMatchInfo = matchInfo - } - - if m { - if isDir && onlyPrefixExcludeExceptions { - // Optimization: we can skip walking this dir if no - // exceptions to exclude patterns could match anything - // inside it. - if !excludeMatcher.Exclusions() { - return filepath.SkipDir - } - - dirSlash := path + string(filepath.Separator) - for _, pat := range excludeMatcher.Patterns() { - if !pat.Exclusion() { - continue - } - patStr := patternWithoutTrailingGlob(pat) + string(filepath.Separator) - if strings.HasPrefix(patStr, dirSlash) { - goto passedExcludeFilter - } - } - return filepath.SkipDir - } - passedExcludeFilter: - skip = true - } - } - - if walkErr != nil { - if skip && errors.Is(walkErr, os.ErrPermission) { - return nil - } - return walkErr - } - - if includeMatcher != nil || excludeMatcher != nil { - defer func() { - if isDir { - parentDirs = append(parentDirs, dir) - } - }() - } - - if skip { - return nil - } - - dir.calledFn = true - - stat, err := mkstat(origpath, path, fi, seenFiles) - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - if opt != nil && opt.Map != nil { - result := opt.Map(stat.Path, stat) - if result == MapResultSkipDir { - return filepath.SkipDir - } else if result == MapResultExclude { - return nil - } - } - for i, parentDir := range parentDirs { - if parentDir.calledFn { - continue - } - parentStat, err := mkstat(parentDir.origpath, parentDir.path, parentDir.fi, seenFiles) - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if opt != nil && opt.Map != nil { - result := opt.Map(parentStat.Path, parentStat) - if result == MapResultSkipDir || result == MapResultExclude { - continue - } - } - - if err := fn(parentStat.Path, &StatInfo{parentStat}, nil); err != nil { - return err - } - parentDirs[i].calledFn = true - } - if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil { - return err - } - } - return nil - }) -} - -func patternWithoutTrailingGlob(p *patternmatcher.Pattern) string { - patStr := p.String() - // We use filepath.Separator here because patternmatcher.Pattern patterns - // get transformed to use the native path separator: - // https://github.com/moby/patternmatcher/blob/130b41bafc16209dc1b52a103fdac1decad04f1a/patternmatcher.go#L52 - patStr = strings.TrimSuffix(patStr, string(filepath.Separator)+"**") - patStr = strings.TrimSuffix(patStr, string(filepath.Separator)+"*") - return patStr -} - -type StatInfo struct { - *types.Stat -} - -func (s *StatInfo) Name() string { - return filepath.Base(s.Stat.Path) -} -func (s *StatInfo) Size() int64 { - return s.Stat.Size_ -} -func (s *StatInfo) Mode() os.FileMode { - return os.FileMode(s.Stat.Mode) -} -func (s *StatInfo) ModTime() time.Time { - return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) -} -func (s *StatInfo) IsDir() bool { - return s.Mode().IsDir() -} -func (s *StatInfo) Sys() interface{} { - return s.Stat -} - -func isNotExist(err error) bool { - return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 505e7b7b59..67d10ea7d3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -473,9 +473,6 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/ttrpc v1.2.2 ## explicit; go 1.13 github.com/containerd/ttrpc -# github.com/containerd/typeurl v1.0.2 -## explicit; go 1.13 -github.com/containerd/typeurl # github.com/containerd/typeurl/v2 v2.1.1 ## explicit; go 1.13 github.com/containerd/typeurl/v2 @@ -488,6 +485,9 @@ github.com/cyphar/filepath-securejoin # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom +# github.com/distribution/reference v0.5.0 +## explicit; go 1.20 +github.com/distribution/reference # github.com/docker/cli v24.0.7+incompatible ## explicit github.com/docker/cli/cli/config @@ -500,12 +500,13 @@ github.com/docker/distribution github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.7+incompatible +# github.com/docker/docker v25.0.3+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types github.com/docker/docker/api/types/backend github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/checkpoint github.com/docker/docker/api/types/container github.com/docker/docker/api/types/events github.com/docker/docker/api/types/filters @@ -517,6 +518,7 @@ github.com/docker/docker/api/types/registry github.com/docker/docker/api/types/strslice github.com/docker/docker/api/types/swarm github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/api/types/system github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume @@ -541,7 +543,9 @@ github.com/docker/docker/daemon/network github.com/docker/docker/dockerversion github.com/docker/docker/errdefs github.com/docker/docker/image +github.com/docker/docker/image/spec/specs-go/v1 github.com/docker/docker/internal/mounttree +github.com/docker/docker/internal/multierror github.com/docker/docker/internal/unshare github.com/docker/docker/layer github.com/docker/docker/libcontainerd/types @@ -556,14 +560,12 @@ github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/ioutils github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/longpath -github.com/docker/docker/pkg/meminfo github.com/docker/docker/pkg/parsers github.com/docker/docker/pkg/parsers/kernel github.com/docker/docker/pkg/plugingetter github.com/docker/docker/pkg/plugins github.com/docker/docker/pkg/plugins/transport github.com/docker/docker/pkg/pools -github.com/docker/docker/pkg/process github.com/docker/docker/pkg/progress github.com/docker/docker/pkg/reexec github.com/docker/docker/pkg/rootless @@ -833,7 +835,7 @@ github.com/karrick/godirwalk # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.17.0 +# github.com/klauspost/compress v1.17.2 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse @@ -857,8 +859,8 @@ github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/moby/buildkit v0.11.6 -## explicit; go 1.18 +# github.com/moby/buildkit v0.12.5 +## explicit; go 1.20 github.com/moby/buildkit/frontend/dockerfile/command github.com/moby/buildkit/frontend/dockerfile/dockerignore github.com/moby/buildkit/frontend/dockerfile/instructions @@ -919,7 +921,6 @@ github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.1.12 ## explicit; go 1.17 -github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go @@ -954,21 +955,18 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.37.0 -## explicit; go 1.16 +# github.com/prometheus/common v0.42.0 +## explicit; go 1.18 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.8.0 -## explicit; go 1.17 +# github.com/prometheus/procfs v0.9.0 +## explicit; go 1.18 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/rootless-containers/rootlesskit v1.1.0 ## explicit; go 1.19 -github.com/rootless-containers/rootlesskit/pkg/api -github.com/rootless-containers/rootlesskit/pkg/api/client -github.com/rootless-containers/rootlesskit/pkg/port # github.com/sagikazarmark/locafero v0.4.0 ## explicit; go 1.20 github.com/sagikazarmark/locafero @@ -1019,10 +1017,8 @@ github.com/spf13/viper/internal/features # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa -## explicit; go 1.18 -github.com/tonistiigi/fsutil -github.com/tonistiigi/fsutil/types +# github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb +## explicit; go 1.19 # github.com/toqueteos/webbrowser v1.2.0 ## explicit; go 1.12 github.com/toqueteos/webbrowser