From 36da0342f9f104567b0d6eb8a7fe80732f6cf179 Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Fri, 15 Mar 2019 14:37:14 +0900 Subject: [PATCH 1/4] vendor BuildKit v0.4.0 Signed-off-by: Akihiro Suda --- Dockerfile | 15 +- Makefile | 4 +- client/pull.go | 22 +- client/push.go | 6 +- client/workeropt.go | 5 - go.mod | 48 +- go.sum | 108 +- login.go | 17 +- .../Microsoft/hcsshim/.gometalinter.json | 17 + .../github.com/Microsoft/hcsshim/appveyor.yml | 14 +- .../Microsoft/hcsshim/hnsendpoint.go | 3 + .../github.com/Microsoft/hcsshim/interface.go | 5 + .../hcsshim/internal/guestrequest/types.go | 100 + .../hcsshim/internal/hcs/callback.go | 49 +- .../Microsoft/hcsshim/internal/hcs/errors.go | 12 +- .../Microsoft/hcsshim/internal/hcs/hcs.go | 1 + .../Microsoft/hcsshim/internal/hcs/log.go | 15 + .../Microsoft/hcsshim/internal/hcs/process.go | 181 +- .../Microsoft/hcsshim/internal/hcs/system.go | 393 +- .../Microsoft/hcsshim/internal/hcs/watcher.go | 31 +- .../hcsshim/internal/hcs/zsyscall_windows.go | 154 +- .../hcsshim/internal/hcserror/hcserror.go | 4 - .../hcsshim/internal/hns/hnsendpoint.go | 2 + .../hcsshim/internal/hns/hnspolicylist.go | 1 + .../hcsshim/internal/hns/zsyscall_windows.go | 8 +- .../hcsshim/internal/interop/interop.go | 4 +- .../internal/interop/zsyscall_windows.go | 6 +- .../hcsshim/internal/logfields/fields.go | 37 + .../hcsshim/internal/schema1/schema1.go | 23 +- .../hcsshim/internal/schema2/attachment.go | 31 + .../hcsshim/internal/schema2/battery.go | 13 + .../schema2/cache_query_stats_response.go | 19 + .../hcsshim/internal/schema2/chipset.go | 27 + .../hcsshim/internal/schema2/close_handle.go | 15 + .../hcsshim/internal/schema2/com_port.go | 18 + .../internal/schema2/compute_system.go | 27 + .../hcsshim/internal/schema2/configuration.go | 72 + .../hcsshim/internal/schema2/console_size.go | 17 + .../hcsshim/internal/schema2/container.go | 35 + .../container_credential_guard_state.go | 25 + .../schema2/container_memory_information.go | 26 + .../hcsshim/internal/schema2/device.go | 16 + .../hcsshim/internal/schema2/devices.go | 43 + .../internal/schema2/enhanced_mode_video.go | 15 + .../internal/schema2/flexible_io_device.go | 19 + .../internal/schema2/guest_connection.go | 19 + .../internal/schema2/guest_connection_info.go | 21 + .../internal/schema2/guest_crash_reporting.go | 15 + .../hcsshim/internal/schema2/guest_os.go | 15 + .../hcsshim/internal/schema2/guest_state.go | 22 + .../hcsshim/internal/schema2/hosted_system.go | 17 + .../hcsshim/internal/schema2/hv_socket.go | 17 + .../hcsshim/internal/schema2/hv_socket_2.go | 16 + .../schema2/hv_socket_service_config.go | 22 + .../schema2/hv_socket_system_config.go | 22 + .../hcsshim/internal/schema2/keyboard.go | 13 + .../hcsshim/internal/schema2/layer.go | 22 + .../internal/schema2/linux_kernel_direct.go | 18 + .../internal/schema2/mapped_directory.go | 21 + .../hcsshim/internal/schema2/mapped_pipe.go | 19 + .../hcsshim/internal/schema2/memory.go | 15 + .../hcsshim/internal/schema2/memory_2.go | 25 + .../schema2/memory_information_for_vm.go | 19 + .../hcsshim/internal/schema2/memory_stats.go | 20 + .../schema2/modify_setting_request.go | 20 + .../hcsshim/internal/schema2/mouse.go | 13 + .../internal/schema2/network_adapter.go | 17 + .../hcsshim/internal/schema2/networking.go | 24 + .../internal/schema2/pause_notification.go | 16 + .../hcsshim/internal/schema2/pause_options.go | 18 + .../hcsshim/internal/schema2/plan9.go | 15 + .../hcsshim/internal/schema2/plan9_share.go | 33 + .../internal/schema2/process_details.go | 34 + .../schema2/process_modify_request.go | 20 + .../internal/schema2/process_parameters.go | 47 + .../internal/schema2/process_status.go | 22 + .../hcsshim/internal/schema2/processor.go | 19 + .../hcsshim/internal/schema2/processor_2.go | 21 + .../internal/schema2/processor_stats.go | 20 + .../hcsshim/internal/schema2/properties.go | 47 + .../internal/schema2/property_query.go | 16 + .../schema2/rdp_connection_options.go | 17 + .../internal/schema2/registry_changes.go | 17 + .../hcsshim/internal/schema2/registry_key.go | 19 + .../internal/schema2/registry_value.go | 31 + .../hcsshim/internal/schema2/restore_state.go | 19 + .../hcsshim/internal/schema2/save_options.go | 19 + .../hcsshim/internal/schema2/scsi.go | 16 + .../schema2/shared_memory_configuration.go | 15 + .../internal/schema2/shared_memory_region.go | 23 + .../schema2/shared_memory_region_info.go | 17 + .../internal/schema2/silo_properties.go | 18 + .../hcsshim/internal/schema2/statistics.go | 30 + .../hcsshim/internal/schema2/storage.go | 21 + .../hcsshim/internal/schema2/storage_qo_s.go | 17 + .../hcsshim/internal/schema2/storage_stats.go | 22 + .../hcsshim/internal/schema2/topology.go | 17 + .../hcsshim/internal/schema2/uefi.go | 21 + .../internal/schema2/uefi_boot_entry.go | 23 + .../hcsshim/internal/schema2/version.go | 17 + .../hcsshim/internal/schema2/video_monitor.go | 19 + .../internal/schema2/virtual_machine.go | 32 + .../internal/schema2/virtual_node_info.go | 21 + .../schema2/virtual_p_mem_controller.go | 20 + .../internal/schema2/virtual_p_mem_device.go | 19 + .../hcsshim/internal/schema2/virtual_smb.go | 17 + .../internal/schema2/virtual_smb_share.go | 21 + .../schema2/virtual_smb_share_options.go | 63 + .../hcsshim/internal/schema2/vm_memory.go | 27 + .../schema2/windows_crash_reporting.go | 17 + .../hcsshim/internal/wclayer/activatelayer.go | 25 +- .../hcsshim/internal/wclayer/createlayer.go | 26 +- .../internal/wclayer/createscratchlayer.go | 23 +- .../internal/wclayer/deactivatelayer.go | 25 +- .../hcsshim/internal/wclayer/destroylayer.go | 25 +- .../internal/wclayer/expandscratchsize.go | 26 +- .../hcsshim/internal/wclayer/exportlayer.go | 113 +- .../internal/wclayer/getlayermountpath.go | 33 +- .../internal/wclayer/getsharedbaseimages.go | 19 +- .../hcsshim/internal/wclayer/grantvmaccess.go | 28 +- .../hcsshim/internal/wclayer/importlayer.go | 119 +- .../hcsshim/internal/wclayer/layerexists.go | 26 +- .../hcsshim/internal/wclayer/layerutils.go | 4 +- .../hcsshim/internal/wclayer/nametoguid.go | 20 +- .../hcsshim/internal/wclayer/preparelayer.go | 23 +- .../internal/wclayer/unpreparelayer.go | 25 +- .../hcsshim/internal/wclayer/wclayer.go | 12 +- .../internal/wclayer/zsyscall_windows.go | 233 +- vendor/github.com/Microsoft/hcsshim/layer.go | 6 +- .../Microsoft/hcsshim/mksyscall_windows.go | 13 +- .../github.com/Microsoft/hcsshim/vendor.conf | 21 + .../github.com/Microsoft/hcsshim/version.go | 6 - .../Microsoft/hcsshim/zsyscall_windows.go | 8 +- vendor/github.com/Nvveen/Gotty/LICENSE | 26 - vendor/github.com/Nvveen/Gotty/README | 5 - vendor/github.com/Nvveen/Gotty/TODO | 3 - vendor/github.com/Nvveen/Gotty/attributes.go | 514 -- vendor/github.com/Nvveen/Gotty/gotty.go | 238 - vendor/github.com/Nvveen/Gotty/parser.go | 362 -- vendor/github.com/Nvveen/Gotty/types.go | 23 - .../checkpoint-restore/go-criu/LICENSE | 201 + .../checkpoint-restore/go-criu/rpc/rpc.pb.go | 1211 +++++ .../containerd/api/events/container.pb.go | 1238 +++++ .../containerd/api/events/container.proto | 31 + .../containerd/api/events/content.pb.go | 329 ++ .../containerd/api/events/content.proto | 13 + .../containerd/containerd/api/events/doc.go | 19 + .../containerd/api/events/image.pb.go | 949 ++++ .../containerd/api/events/image.proto | 22 + .../containerd/api/events/namespace.pb.go | 950 ++++ .../containerd/api/events/namespace.proto | 23 + .../containerd/api/events/snapshot.pb.go | 704 +++ .../containerd/api/events/snapshot.proto | 22 + .../containerd/api/events/task.pb.go | 2610 ++++++++++ .../containerd/api/events/task.proto | 75 + .../api/services/content/v1/content.pb.go | 4447 +++++++++++++++++ .../api/services/content/v1/content.proto | 318 ++ .../containerd/api/types/descriptor.pb.go | 410 ++ .../containerd/api/types/descriptor.proto | 18 + .../containerd/containerd/api/types/doc.go | 17 + .../containerd/api/types/metrics.pb.go | 412 ++ .../containerd/api/types/metrics.proto | 15 + .../containerd/api/types/mount.pb.go | 456 ++ .../containerd/api/types/mount.proto | 29 + .../containerd/api/types/platform.pb.go | 394 ++ .../containerd/api/types/platform.proto | 15 + .../archive/compression/compression.go | 57 +- .../containerd/containerd/archive/tar.go | 4 +- .../containerd/archive/tar_windows.go | 8 +- .../containerd/archive/time_unix.go | 2 +- .../containerd/containerd/content/content.go | 7 +- .../containerd/content/local/locks.go | 4 +- .../containerd/content/local/store.go | 5 +- .../containerd/content/local/writer.go | 71 +- .../content/proxy/content_reader.go | 65 + .../containerd/content/proxy/content_store.go | 234 + .../content/proxy/content_writer.go | 139 + .../containerd/contrib/seccomp/seccomp.go | 4 +- .../containerd/containerd/diff/apply/apply.go | 2 +- .../containerd/diff/walking/differ.go | 11 +- .../containerd/containerd/errdefs/grpc.go | 2 +- .../containerd/events/exchange/exchange.go | 8 +- .../containerd/containerd/filters/parser.go | 2 +- .../containerd/containerd/filters/scanner.go | 1 - .../containerd/identifiers/validate.go | 2 +- .../containerd/containerd/images/handlers.go | 16 +- .../containerd/containerd/images/image.go | 9 +- .../containerd/images/mediatypes.go | 13 +- .../containerd/images/oci/exporter.go | 40 +- .../containerd/containerd/metadata/buckets.go | 90 +- .../containerd/metadata/containers.go | 2 +- .../containerd/containerd/metadata/content.go | 80 +- .../containerd/containerd/metadata/db.go | 34 +- .../containerd/containerd/metadata/gc.go | 4 - .../containerd/containerd/metadata/images.go | 2 +- .../containerd/containerd/metadata/leases.go | 2 +- .../containerd/metadata/snapshot.go | 2 +- .../containerd/containerd/mount/mount_unix.go | 2 +- ...{mountinfo_freebsd.go => mountinfo_bsd.go} | 2 + .../containerd/mount/mountinfo_linux.go | 8 +- .../containerd/mount/mountinfo_unsupported.go | 2 +- .../containerd/containerd/oci/spec.go | 13 +- .../containerd/containerd/oci/spec_opts.go | 61 +- .../containerd/oci/spec_opts_windows.go | 67 + .../containerd/platforms/cpuinfo.go | 16 + .../containerd/containerd/plugin/plugin.go | 5 +- .../containerd/remotes/docker/auth.go | 4 +- .../containerd/remotes/docker/authorizer.go | 317 ++ .../containerd/remotes/docker/converter.go | 88 + .../containerd/remotes/docker/resolver.go | 367 +- .../remotes/docker/schema1/converter.go | 22 +- .../containerd/containerd/remotes/handlers.go | 2 +- .../containerd/services/content/service.go | 492 ++ .../containerd/services/content/store.go | 71 + .../containerd/services/services.go | 36 + .../containerd/snapshots/native/native.go | 3 +- .../containerd/snapshots/overlay/overlay.go | 4 +- .../containerd/snapshots/snapshotter.go | 20 +- .../time_darwin.go => version/version.go} | 21 +- .../containerd/continuity/.travis.yml | 11 + vendor/github.com/coreos/go-systemd/LICENSE | 191 + vendor/github.com/coreos/go-systemd/NOTICE | 5 + .../coreos/go-systemd/activation/files.go | 67 + .../coreos/go-systemd/activation/listeners.go | 103 + .../go-systemd/activation/packetconns.go | 38 + .../github.com/coreos/go-systemd/dbus/dbus.go | 240 + .../coreos/go-systemd/dbus/methods.go | 594 +++ .../coreos/go-systemd/dbus/properties.go | 237 + .../github.com/coreos/go-systemd/dbus/set.go | 47 + .../coreos/go-systemd/dbus/subscription.go | 333 ++ .../go-systemd/dbus/subscription_set.go | 57 + .../github.com/coreos/go-systemd/util/util.go | 90 + .../coreos/go-systemd/util/util_cgo.go | 175 + .../coreos/go-systemd/util/util_stub.go | 23 + vendor/github.com/coreos/pkg/LICENSE | 202 + vendor/github.com/coreos/pkg/NOTICE | 5 + vendor/github.com/coreos/pkg/dlopen/dlopen.go | 82 + .../coreos/pkg/dlopen/dlopen_example.go | 56 + vendor/github.com/docker/cli/AUTHORS | 46 +- .../docker/cli/cli/config/config.go | 13 +- .../docker/cli/cli/config/configfile/file.go | 12 +- .../cli/cli/config/credentials/credentials.go | 2 +- .../cli/cli/config/credentials/file_store.go | 23 +- .../cli/config/credentials/native_store.go | 2 +- .../docker/cli/cli/config/types/authconfig.go | 22 + vendor/github.com/docker/cli/opts/config.go | 98 - vendor/github.com/docker/cli/opts/duration.go | 64 - vendor/github.com/docker/cli/opts/env.go | 46 - vendor/github.com/docker/cli/opts/envfile.go | 22 - vendor/github.com/docker/cli/opts/file.go | 77 - vendor/github.com/docker/cli/opts/hosts.go | 165 - .../github.com/docker/cli/opts/hosts_unix.go | 8 - .../docker/cli/opts/hosts_windows.go | 6 - vendor/github.com/docker/cli/opts/ip.go | 47 - vendor/github.com/docker/cli/opts/mount.go | 174 - vendor/github.com/docker/cli/opts/network.go | 106 - vendor/github.com/docker/cli/opts/opts.go | 509 -- .../github.com/docker/cli/opts/opts_unix.go | 6 - .../docker/cli/opts/opts_windows.go | 56 - vendor/github.com/docker/cli/opts/parse.go | 99 - vendor/github.com/docker/cli/opts/port.go | 172 - .../docker/cli/opts/quotedstring.go | 37 - vendor/github.com/docker/cli/opts/runtime.go | 79 - vendor/github.com/docker/cli/opts/secret.go | 98 - .../docker/cli/opts/throttledevice.go | 108 - vendor/github.com/docker/cli/opts/ulimit.go | 57 - .../docker/cli/opts/weightdevice.go | 84 - .../github.com/docker/distribution/.mailmap | 24 +- vendor/github.com/docker/distribution/AUTHORS | 182 - .../docker/distribution/CHANGELOG.md | 108 - .../github.com/docker/distribution/Dockerfile | 12 +- .../github.com/docker/distribution/Makefile | 3 - .../docker/distribution/RELEASE-CHECKLIST.md | 44 - .../manifest/manifestlist/manifestlist.go | 4 +- .../distribution/manifest/schema1/manifest.go | 4 +- .../manifest/schema1/reference_builder.go | 2 +- .../distribution/manifest/schema2/manifest.go | 2 +- .../docker/distribution/manifests.go | 2 +- .../distribution/reference/normalize.go | 29 + .../distribution/reference/reference.go | 2 +- .../registry/api/errcode/errors.go | 6 +- .../distribution/registry/api/v2/urls.go | 12 - .../client/auth/challenge/authchallenge.go | 4 +- .../docker/distribution/vendor.conf | 3 +- .../docker/api/types/container/host_config.go | 10 +- .../docker/docker/api/types/filters/parse.go | 16 + .../docker/docker/api/types/mount/mount.go | 3 +- .../docker/api/types/network/network.go | 13 +- .../docker/docker/api/types/seccomp.go | 5 +- .../docker/docker/api/types/swarm/config.go | 7 +- .../docker/api/types/swarm/container.go | 2 + .../docker/docker/api/types/swarm/swarm.go | 2 + .../docker/docker/api/types/swarm/task.go | 1 + .../docker/docker/api/types/types.go | 2 + .../docker/docker/errdefs/helpers.go | 55 +- .../docker/docker/pkg/archive/archive.go | 8 +- .../docker/pkg/archive/archive_linux.go | 179 +- .../docker/pkg/archive/archive_other.go | 2 +- .../docker/docker/pkg/archive/changes.go | 12 +- .../docker/docker/pkg/archive/changes_unix.go | 8 +- .../docker/pkg/archive/changes_windows.go | 8 +- .../docker/docker/pkg/archive/copy.go | 8 + .../docker/docker/pkg/archive/diff.go | 10 +- .../docker/docker/pkg/fileutils/fileutils.go | 2 +- .../docker/pkg/homedir/homedir_linux.go | 88 + .../docker/pkg/homedir/homedir_others.go | 20 + .../docker/pkg/jsonmessage/jsonmessage.go | 104 +- .../docker/docker/pkg/mount/flags.go | 12 - .../docker/docker/pkg/mount/mount.go | 62 +- .../docker/pkg/mount/mounter_freebsd.go | 17 +- .../docker/docker/pkg/mount/mounter_linux.go | 30 +- .../docker/pkg/mount/mounter_unsupported.go | 4 - .../docker/pkg/mount/sharedsubtree_linux.go | 20 +- .../docker/docker/pkg/mount/unmount_unix.go | 22 + .../docker/pkg/mount/unmount_unsupported.go | 7 + .../docker/docker/pkg/signal/signal_linux.go | 2 + .../docker/pkg/signal/signal_linux_mipsx.go | 84 + .../docker/docker/pkg/system/lstat_unix.go | 3 +- .../docker/docker/pkg/system/stat_unix.go | 3 +- .../docker/docker/registry/config.go | 6 - .../docker/docker/registry/registry.go | 2 +- .../docker/docker/registry/service.go | 17 +- .../libnetwork/resolvconf/resolvconf.go | 15 +- .../docker/libnetwork/types/types.go | 39 +- vendor/github.com/godbus/dbus/.travis.yml | 40 + vendor/github.com/godbus/dbus/CONTRIBUTING.md | 50 + vendor/github.com/godbus/dbus/LICENSE | 25 + vendor/github.com/godbus/dbus/MAINTAINERS | 3 + vendor/github.com/godbus/dbus/README.markdown | 44 + vendor/github.com/godbus/dbus/auth.go | 253 + .../github.com/godbus/dbus/auth_external.go | 26 + vendor/github.com/godbus/dbus/auth_sha1.go | 102 + vendor/github.com/godbus/dbus/call.go | 36 + vendor/github.com/godbus/dbus/conn.go | 683 +++ vendor/github.com/godbus/dbus/conn_darwin.go | 33 + vendor/github.com/godbus/dbus/conn_other.go | 42 + vendor/github.com/godbus/dbus/dbus.go | 427 ++ vendor/github.com/godbus/dbus/decoder.go | 228 + .../github.com/godbus/dbus/default_handler.go | 291 ++ vendor/github.com/godbus/dbus/doc.go | 69 + vendor/github.com/godbus/dbus/encoder.go | 210 + vendor/github.com/godbus/dbus/export.go | 413 ++ vendor/github.com/godbus/dbus/homedir.go | 28 + .../github.com/godbus/dbus/homedir_dynamic.go | 15 + .../github.com/godbus/dbus/homedir_static.go | 45 + vendor/github.com/godbus/dbus/message.go | 353 ++ vendor/github.com/godbus/dbus/object.go | 147 + .../godbus/dbus/server_interfaces.go | 89 + vendor/github.com/godbus/dbus/sig.go | 259 + .../godbus/dbus/transport_darwin.go | 6 + .../godbus/dbus/transport_generic.go | 50 + .../github.com/godbus/dbus/transport_tcp.go | 43 + .../github.com/godbus/dbus/transport_unix.go | 196 + .../dbus/transport_unixcred_dragonfly.go | 95 + .../godbus/dbus/transport_unixcred_freebsd.go | 91 + .../godbus/dbus/transport_unixcred_linux.go | 25 + .../godbus/dbus/transport_unixcred_openbsd.go | 14 + vendor/github.com/godbus/dbus/variant.go | 144 + .../github.com/godbus/dbus/variant_lexer.go | 284 ++ .../github.com/godbus/dbus/variant_parser.go | 817 +++ vendor/github.com/gofrs/flock/.gitignore | 24 + vendor/github.com/gofrs/flock/.travis.yml | 10 + vendor/github.com/gofrs/flock/LICENSE | 27 + vendor/github.com/gofrs/flock/README.md | 40 + vendor/github.com/gofrs/flock/appveyor.yml | 25 + vendor/github.com/gofrs/flock/flock.go | 127 + vendor/github.com/gofrs/flock/flock_unix.go | 195 + vendor/github.com/gofrs/flock/flock_winapi.go | 76 + .../github.com/gofrs/flock/flock_windows.go | 140 + vendor/github.com/gogo/protobuf/LICENSE | 5 +- .../github.com/gogo/protobuf/gogoproto/doc.go | 2 +- .../gogo/protobuf/gogoproto/gogo.pb.go | 215 +- .../gogo/protobuf/gogoproto/gogo.proto | 8 + .../gogo/protobuf/gogoproto/helper.go | 57 + .../github.com/gogo/protobuf/proto/encode.go | 18 - vendor/github.com/gogo/protobuf/proto/lib.go | 80 +- .../protobuf/proto/pointer_unsafe_gogo.go | 2 +- .../gogo/protobuf/proto/properties.go | 30 +- .../gogo/protobuf/proto/table_marshal.go | 313 +- .../gogo/protobuf/proto/table_unmarshal.go | 255 +- vendor/github.com/gogo/protobuf/proto/text.go | 4 +- .../gogo/protobuf/proto/text_parser.go | 26 +- .../gogo/protobuf/proto/wrappers.go | 1888 +++++++ .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../github.com/gogo/protobuf/types/any.pb.go | 29 +- .../github.com/gogo/protobuf/types/api.pb.go | 93 +- .../gogo/protobuf/types/duration.pb.go | 29 +- .../gogo/protobuf/types/empty.pb.go | 28 +- .../gogo/protobuf/types/field_mask.pb.go | 31 +- .../gogo/protobuf/types/protosize.go | 34 + .../gogo/protobuf/types/source_context.pb.go | 29 +- .../gogo/protobuf/types/struct.pb.go | 113 +- .../gogo/protobuf/types/timestamp.pb.go | 29 +- .../github.com/gogo/protobuf/types/type.pb.go | 219 +- .../gogo/protobuf/types/wrappers.pb.go | 70 +- .../gogo/protobuf/types/wrappers_gogo.go | 300 ++ .../github.com/ishidawataru/sctp/.gitignore | 14 + .../github.com/ishidawataru/sctp/.travis.yml | 17 + vendor/github.com/ishidawataru/sctp/LICENSE | 201 + vendor/github.com/ishidawataru/sctp/README.md | 18 + vendor/github.com/ishidawataru/sctp/sctp.go | 656 +++ .../ishidawataru/sctp/sctp_linux.go | 227 + .../ishidawataru/sctp/sctp_unsupported.go | 47 + vendor/github.com/moby/buildkit/AUTHORS | 56 + .../api/services/control/control.pb.go | 1649 ++++-- .../api/services/control/control.proto | 30 +- .../moby/buildkit/api/types/worker.pb.go | 129 +- .../buildkit/cache/contenthash/checksum.go | 186 +- .../buildkit/cache/contenthash/checksum.pb.go | 198 +- .../moby/buildkit/cache/remotecache/export.go | 38 +- .../moby/buildkit/cache/remotecache/import.go | 178 +- .../cache/remotecache/v1/cachestorage.go | 73 +- .../buildkit/cache/remotecache/v1/chains.go | 27 + .../moby/buildkit/cache/remotecache/v1/doc.go | 2 +- .../buildkit/cache/remotecache/v1/parse.go | 14 +- .../buildkit/cache/remotecache/v1/utils.go | 12 + .../moby/buildkit/client/client_windows.go | 2 +- .../github.com/moby/buildkit/client/graph.go | 1 + .../moby/buildkit/client/llb/state.go | 8 +- .../moby/buildkit/client/ociindex/ociindex.go | 113 + .../github.com/moby/buildkit/client/solve.go | 223 +- .../moby/buildkit/control/control.go | 123 +- .../moby/buildkit/executor/oci/mounts.go | 63 +- .../moby/buildkit/executor/oci/spec_unix.go | 32 +- .../executor/runcexecutor/executor.go | 6 +- .../exporter/containerimage/exptypes/types.go | 1 + .../exporter/containerimage/writer.go | 49 +- .../moby/buildkit/exporter/oci/export.go | 2 +- .../frontend/dockerfile/builder/build.go | 93 +- .../dockerfile/dockerfile2llb/convert.go | 26 +- .../dockerfile2llb/convert_secrets.go | 16 + .../dockerfile/dockerfile2llb/convert_ssh.go | 17 +- .../instructions/commands_runmount.go | 35 + .../frontend/dockerfile/parser/parser.go | 5 + .../moby/buildkit/frontend/frontend.go | 2 + .../frontend/gateway/client/client.go | 13 +- .../frontend/gateway/forwarder/forward.go | 8 +- .../moby/buildkit/frontend/gateway/gateway.go | 45 +- .../frontend/gateway/grpcclient/client.go | 22 +- .../moby/buildkit/frontend/gateway/pb/caps.go | 8 + .../frontend/gateway/pb/gateway.pb.go | 1551 +++++- .../frontend/gateway/pb/gateway.proto | 17 +- .../moby/buildkit/session/auth/auth.pb.go | 113 +- .../buildkit/session/content/attachable.go | 132 + .../moby/buildkit/session/content/caller.go | 84 + .../buildkit/session/filesync/filesync.pb.go | 87 +- .../buildkit/session/secrets/secrets.pb.go | 121 +- .../moby/buildkit/session/session.go | 4 +- .../buildkit/session/sshforward/ssh.pb.go | 154 +- .../snapshot/imagerefchecker/checker.go | 2 +- .../moby/buildkit/snapshot/snapshotter.go | 2 +- .../moby/buildkit/solver/cachemanager.go | 83 +- .../moby/buildkit/solver/cachestorage.go | 2 +- .../moby/buildkit/solver/combinedcache.go | 27 +- .../github.com/moby/buildkit/solver/edge.go | 41 +- .../buildkit/solver/internal/pipe/pipe.go | 2 - .../github.com/moby/buildkit/solver/jobs.go | 4 +- .../moby/buildkit/solver/llbsolver/bridge.go | 61 +- .../buildkit/solver/llbsolver/ops/exec.go | 40 +- .../buildkit/solver/llbsolver/ops/source.go | 16 +- .../moby/buildkit/solver/llbsolver/result.go | 2 +- .../moby/buildkit/solver/llbsolver/solver.go | 107 +- .../moby/buildkit/solver/llbsolver/vertex.go | 16 +- .../buildkit/solver/memorycachestorage.go | 4 +- .../moby/buildkit/solver/pb/ops.pb.go | 1028 +++- .../github.com/moby/buildkit/solver/types.go | 2 +- .../buildkit/source/containerimage/pull.go | 33 +- .../moby/buildkit/source/git/gitsource.go | 19 +- .../buildkit/source/git/gitsource_unix.go | 10 +- .../moby/buildkit/source/http/httpsource.go | 36 +- .../moby/buildkit/source/local/local.go | 11 +- .../moby/buildkit/source/manager.go | 7 +- .../moby/buildkit/util/apicaps/pb/caps.pb.go | 74 +- .../moby/buildkit/util/contentutil/copy.go | 2 +- .../moby/buildkit/util/imageutil/config.go | 12 +- .../moby/buildkit/util/pull/pull.go | 4 +- .../moby/buildkit/util/push/push.go | 2 +- .../moby/buildkit/worker/base/worker.go | 132 +- .../moby/buildkit/worker/cacheresult.go | 4 +- .../github.com/moby/buildkit/worker/worker.go | 7 +- .../github.com/mrunalp/fileutils/.gitignore | 1 + vendor/github.com/mrunalp/fileutils/LICENSE | 191 + .../github.com/mrunalp/fileutils/MAINTAINERS | 1 + vendor/github.com/mrunalp/fileutils/README.md | 5 + .../github.com/mrunalp/fileutils/fileutils.go | 158 + .../github.com/mrunalp/fileutils/idtools.go | 49 + .../go-digest/{LICENSE => LICENSE.code} | 0 .../opencontainers/go-digest/README.md | 2 +- .../runc/libcontainer/README.md | 330 ++ .../opencontainers/runc/libcontainer/SPEC.md | 452 ++ .../runc/libcontainer/apparmor/apparmor.go | 54 + .../apparmor/apparmor_disabled.go | 20 + .../runc/libcontainer/capabilities_linux.go | 113 + .../runc/libcontainer/cgroups/cgroups.go | 64 + .../cgroups/cgroups_unsupported.go | 3 + .../runc/libcontainer/cgroups/fs/apply_raw.go | 409 ++ .../runc/libcontainer/cgroups/fs/blkio.go | 237 + .../runc/libcontainer/cgroups/fs/cpu.go | 117 + .../runc/libcontainer/cgroups/fs/cpuacct.go | 121 + .../runc/libcontainer/cgroups/fs/cpuset.go | 159 + .../runc/libcontainer/cgroups/fs/devices.go | 80 + .../runc/libcontainer/cgroups/fs/freezer.go | 66 + .../libcontainer/cgroups/fs/fs_unsupported.go | 3 + .../runc/libcontainer/cgroups/fs/hugetlb.go | 71 + .../runc/libcontainer/cgroups/fs/kmem.go | 55 + .../libcontainer/cgroups/fs/kmem_disabled.go | 11 + .../runc/libcontainer/cgroups/fs/memory.go | 270 + .../runc/libcontainer/cgroups/fs/name.go | 40 + .../runc/libcontainer/cgroups/fs/net_cls.go | 43 + .../runc/libcontainer/cgroups/fs/net_prio.go | 41 + .../libcontainer/cgroups/fs/perf_event.go | 35 + .../runc/libcontainer/cgroups/fs/pids.go | 73 + .../runc/libcontainer/cgroups/fs/utils.go | 78 + .../runc/libcontainer/cgroups/stats.go | 108 + .../cgroups/systemd/apply_nosystemd.go | 55 + .../cgroups/systemd/apply_systemd.go | 604 +++ .../runc/libcontainer/cgroups/utils.go | 463 ++ .../runc/libcontainer/configs/blkio_device.go | 61 + .../runc/libcontainer/configs/cgroup_linux.go | 122 + .../libcontainer/configs/cgroup_windows.go | 6 + .../runc/libcontainer/configs/config.go | 356 ++ .../runc/libcontainer/configs/config_linux.go | 61 + .../runc/libcontainer/configs/device.go | 57 + .../libcontainer/configs/device_defaults.go | 111 + .../libcontainer/configs/hugepage_limit.go | 9 + .../runc/libcontainer/configs/intelrdt.go | 11 + .../configs/interface_priority_map.go | 14 + .../runc/libcontainer/configs/mount.go | 39 + .../runc/libcontainer/configs/namespaces.go | 5 + .../libcontainer/configs/namespaces_linux.go | 126 + .../configs/namespaces_syscall.go | 32 + .../configs/namespaces_syscall_unsupported.go | 13 + .../configs/namespaces_unsupported.go | 8 + .../runc/libcontainer/configs/network.go | 72 + .../libcontainer/configs/validate/rootless.go | 89 + .../configs/validate/validator.go | 245 + .../runc/libcontainer/console_linux.go | 41 + .../runc/libcontainer/container.go | 166 + .../runc/libcontainer/container_linux.go | 1896 +++++++ .../runc/libcontainer/criu_opts_linux.go | 40 + .../runc/libcontainer/criurpc/Makefile | 2 + .../runc/libcontainer/criurpc/criurpc.pb.go | 1178 +++++ .../runc/libcontainer/criurpc/criurpc.proto | 209 + .../opencontainers/runc/libcontainer/error.go | 70 + .../runc/libcontainer/factory.go | 44 + .../runc/libcontainer/factory_linux.go | 396 ++ .../runc/libcontainer/generic_error.go | 92 + .../runc/libcontainer/init_linux.go | 536 ++ .../runc/libcontainer/intelrdt/intelrdt.go | 730 +++ .../runc/libcontainer/intelrdt/stats.go | 40 + .../runc/libcontainer/keys/keyctl.go | 48 + .../runc/libcontainer/message_linux.go | 89 + .../runc/libcontainer/mount/mount.go | 23 + .../runc/libcontainer/mount/mount_linux.go | 82 + .../runc/libcontainer/mount/mountinfo.go | 40 + .../runc/libcontainer/network_linux.go | 102 + .../runc/libcontainer/notify_linux.go | 90 + .../runc/libcontainer/nsenter/README.md | 44 + .../runc/libcontainer/nsenter/namespace.h | 32 + .../runc/libcontainer/nsenter/nsenter.go | 12 + .../libcontainer/nsenter/nsenter_gccgo.go | 25 + .../nsenter/nsenter_unsupported.go | 5 + .../runc/libcontainer/nsenter/nsexec.c | 995 ++++ .../runc/libcontainer/process.go | 113 + .../runc/libcontainer/process_linux.go | 571 +++ .../runc/libcontainer/restored_process.go | 122 + .../runc/libcontainer/rootfs_linux.go | 897 ++++ .../runc/libcontainer/seccomp/config.go | 76 + .../libcontainer/seccomp/seccomp_linux.go | 258 + .../seccomp/seccomp_unsupported.go | 24 + .../runc/libcontainer/setns_init_linux.go | 88 + .../runc/libcontainer/specconv/example.go | 221 + .../runc/libcontainer/specconv/spec_linux.go | 837 ++++ .../runc/libcontainer/stacktrace/capture.go | 27 + .../runc/libcontainer/stacktrace/frame.go | 38 + .../libcontainer/stacktrace/stacktrace.go | 5 + .../runc/libcontainer/standard_init_linux.go | 210 + .../runc/libcontainer/state_linux.go | 255 + .../opencontainers/runc/libcontainer/stats.go | 15 + .../runc/libcontainer/stats_linux.go | 10 + .../opencontainers/runc/libcontainer/sync.go | 104 + .../runc/libcontainer/utils/cmsg.go | 93 + .../runc/libcontainer/utils/utils.go | 112 + .../runc/libcontainer/utils/utils_unix.go | 44 + .../github.com/opencontainers/selinux/LICENSE | 201 + .../selinux/go-selinux/label/label.go | 101 + .../selinux/go-selinux/label/label_selinux.go | 224 + .../selinux/go-selinux/selinux_linux.go | 743 +++ .../selinux/go-selinux/selinux_stub.go | 206 + .../selinux/go-selinux/xattrs.go | 78 + vendor/github.com/pkg/errors/.travis.yml | 12 +- vendor/github.com/pkg/errors/README.md | 4 +- vendor/github.com/pkg/errors/errors.go | 43 +- vendor/github.com/pkg/errors/stack.go | 51 +- .../seccomp/libseccomp-golang/.gitignore | 4 + .../seccomp/libseccomp-golang/CHANGELOG | 6 + .../seccomp/libseccomp-golang/LICENSE | 22 + .../seccomp/libseccomp-golang/Makefile | 26 + .../seccomp/libseccomp-golang/README | 51 + .../libseccomp-golang/SUBMITTING_PATCHES | 112 + .../seccomp/libseccomp-golang/seccomp.go | 902 ++++ .../libseccomp-golang/seccomp_internal.go | 514 ++ .../github.com/tonistiigi/fsutil/.travis.yml | 8 +- vendor/github.com/tonistiigi/fsutil/go.mod | 27 + vendor/github.com/tonistiigi/fsutil/go.sum | 70 + vendor/github.com/urfave/cli/.flake8 | 2 + vendor/github.com/urfave/cli/.gitignore | 2 + vendor/github.com/urfave/cli/.travis.yml | 27 + vendor/github.com/urfave/cli/CHANGELOG.md | 435 ++ vendor/github.com/urfave/cli/LICENSE | 21 + vendor/github.com/urfave/cli/README.md | 1391 ++++++ vendor/github.com/urfave/cli/app.go | 509 ++ vendor/github.com/urfave/cli/appveyor.yml | 26 + vendor/github.com/urfave/cli/category.go | 44 + vendor/github.com/urfave/cli/cli.go | 22 + vendor/github.com/urfave/cli/command.go | 304 ++ vendor/github.com/urfave/cli/context.go | 278 ++ vendor/github.com/urfave/cli/errors.go | 115 + vendor/github.com/urfave/cli/flag-types.json | 93 + vendor/github.com/urfave/cli/flag.go | 807 +++ .../github.com/urfave/cli/flag_generated.go | 627 +++ vendor/github.com/urfave/cli/funcs.go | 41 + .../github.com/urfave/cli/generate-flag-types | 255 + vendor/github.com/urfave/cli/help.go | 339 ++ vendor/github.com/urfave/cli/runtests | 122 + .../vishvananda/netlink/.travis.yml | 13 + .../vishvananda/netlink/CHANGELOG.md | 5 + vendor/github.com/vishvananda/netlink/LICENSE | 192 + .../github.com/vishvananda/netlink/Makefile | 30 + .../github.com/vishvananda/netlink/README.md | 92 + vendor/github.com/vishvananda/netlink/addr.go | 56 + .../vishvananda/netlink/addr_linux.go | 354 ++ .../vishvananda/netlink/bpf_linux.go | 53 + .../vishvananda/netlink/bridge_linux.go | 115 + .../github.com/vishvananda/netlink/class.go | 78 + .../vishvananda/netlink/class_linux.go | 255 + .../vishvananda/netlink/conntrack_linux.go | 371 ++ .../netlink/conntrack_unspecified.go | 53 + .../github.com/vishvananda/netlink/filter.go | 288 ++ .../vishvananda/netlink/filter_linux.go | 639 +++ vendor/github.com/vishvananda/netlink/fou.go | 21 + .../vishvananda/netlink/fou_linux.go | 215 + .../vishvananda/netlink/fou_unspecified.go | 15 + .../vishvananda/netlink/genetlink_linux.go | 168 + .../netlink/genetlink_unspecified.go | 25 + .../vishvananda/netlink/gtp_linux.go | 239 + .../vishvananda/netlink/handle_linux.go | 144 + .../vishvananda/netlink/handle_unspecified.go | 258 + .../vishvananda/netlink/ioctl_linux.go | 98 + vendor/github.com/vishvananda/netlink/link.go | 846 ++++ .../vishvananda/netlink/link_linux.go | 2354 +++++++++ .../vishvananda/netlink/link_tuntap_linux.go | 14 + .../github.com/vishvananda/netlink/neigh.go | 25 + .../vishvananda/netlink/neigh_linux.go | 289 ++ .../github.com/vishvananda/netlink/netlink.go | 39 + .../vishvananda/netlink/netlink_linux.go | 11 + .../netlink/netlink_unspecified.go | 225 + .../vishvananda/netlink/nl/addr_linux.go | 77 + .../vishvananda/netlink/nl/bridge_linux.go | 74 + .../vishvananda/netlink/nl/conntrack_linux.go | 189 + .../vishvananda/netlink/nl/genetlink_linux.go | 89 + .../vishvananda/netlink/nl/link_linux.go | 548 ++ .../vishvananda/netlink/nl/mpls_linux.go | 36 + .../vishvananda/netlink/nl/nl_linux.go | 738 +++ .../vishvananda/netlink/nl/nl_unspecified.go | 11 + .../vishvananda/netlink/nl/route_linux.go | 81 + .../vishvananda/netlink/nl/seg6_linux.go | 111 + .../vishvananda/netlink/nl/syscall.go | 78 + .../vishvananda/netlink/nl/tc_linux.go | 710 +++ .../vishvananda/netlink/nl/xfrm_linux.go | 296 ++ .../netlink/nl/xfrm_monitor_linux.go | 32 + .../netlink/nl/xfrm_policy_linux.go | 119 + .../netlink/nl/xfrm_state_linux.go | 334 ++ .../github.com/vishvananda/netlink/order.go | 32 + .../vishvananda/netlink/protinfo.go | 58 + .../vishvananda/netlink/protinfo_linux.go | 75 + .../github.com/vishvananda/netlink/qdisc.go | 292 ++ .../vishvananda/netlink/qdisc_linux.go | 647 +++ .../github.com/vishvananda/netlink/route.go | 178 + .../vishvananda/netlink/route_linux.go | 878 ++++ .../vishvananda/netlink/route_unspecified.go | 11 + vendor/github.com/vishvananda/netlink/rule.go | 42 + .../vishvananda/netlink/rule_linux.go | 234 + .../github.com/vishvananda/netlink/socket.go | 27 + .../vishvananda/netlink/socket_linux.go | 159 + vendor/github.com/vishvananda/netlink/xfrm.go | 75 + .../vishvananda/netlink/xfrm_monitor_linux.go | 97 + .../vishvananda/netlink/xfrm_policy.go | 74 + .../vishvananda/netlink/xfrm_policy_linux.go | 256 + .../vishvananda/netlink/xfrm_state.go | 129 + .../vishvananda/netlink/xfrm_state_linux.go | 457 ++ vendor/github.com/vishvananda/netns/LICENSE | 192 + vendor/github.com/vishvananda/netns/README.md | 51 + vendor/github.com/vishvananda/netns/netns.go | 80 + .../vishvananda/netns/netns_linux.go | 230 + .../vishvananda/netns/netns_unspecified.go | 43 + .../x/crypto/ssh/terminal/terminal.go | 4 + .../golang.org/x/crypto/ssh/terminal/util.go | 4 +- .../x/crypto/ssh/terminal/util_aix.go | 12 + .../x/crypto/ssh/terminal/util_plan9.go | 2 +- .../x/crypto/ssh/terminal/util_solaris.go | 2 +- .../x/crypto/ssh/terminal/util_windows.go | 2 +- .../golang.org/x/sync/semaphore/semaphore.go | 131 + vendor/modules.txt | 95 +- 704 files changed, 82301 insertions(+), 7250 deletions(-) create mode 100644 vendor/github.com/Microsoft/hcsshim/.gometalinter.json create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/vendor.conf delete mode 100644 vendor/github.com/Microsoft/hcsshim/version.go delete mode 100644 vendor/github.com/Nvveen/Gotty/LICENSE delete mode 100644 vendor/github.com/Nvveen/Gotty/README delete mode 100644 vendor/github.com/Nvveen/Gotty/TODO delete mode 100644 vendor/github.com/Nvveen/Gotty/attributes.go delete mode 100644 vendor/github.com/Nvveen/Gotty/gotty.go delete mode 100644 vendor/github.com/Nvveen/Gotty/parser.go delete mode 100644 vendor/github.com/Nvveen/Gotty/types.go create mode 100644 vendor/github.com/checkpoint-restore/go-criu/LICENSE create mode 100644 vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/container.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/container.proto create mode 100644 vendor/github.com/containerd/containerd/api/events/content.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/content.proto create mode 100644 vendor/github.com/containerd/containerd/api/events/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/events/image.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/image.proto create mode 100644 vendor/github.com/containerd/containerd/api/events/namespace.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/namespace.proto create mode 100644 vendor/github.com/containerd/containerd/api/events/snapshot.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/snapshot.proto create mode 100644 vendor/github.com/containerd/containerd/api/events/task.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/events/task.proto create mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/descriptor.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/descriptor.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/doc.go create mode 100644 vendor/github.com/containerd/containerd/api/types/metrics.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/metrics.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/mount.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/mount.proto create mode 100644 vendor/github.com/containerd/containerd/api/types/platform.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/types/platform.proto create mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_reader.go create mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_store.go create mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_writer.go rename vendor/github.com/containerd/containerd/mount/{mountinfo_freebsd.go => mountinfo_bsd.go} (98%) create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_windows.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/authorizer.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/converter.go create mode 100644 vendor/github.com/containerd/containerd/services/content/service.go create mode 100644 vendor/github.com/containerd/containerd/services/content/store.go create mode 100644 vendor/github.com/containerd/containerd/services/services.go rename vendor/github.com/containerd/containerd/{archive/time_darwin.go => version/version.go} (63%) create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/activation/files.go create mode 100644 vendor/github.com/coreos/go-systemd/activation/listeners.go create mode 100644 vendor/github.com/coreos/go-systemd/activation/packetconns.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/dbus.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/methods.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/properties.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/set.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription_set.go create mode 100644 vendor/github.com/coreos/go-systemd/util/util.go create mode 100644 vendor/github.com/coreos/go-systemd/util/util_cgo.go create mode 100644 vendor/github.com/coreos/go-systemd/util/util_stub.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen.go create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen_example.go create mode 100644 vendor/github.com/docker/cli/cli/config/types/authconfig.go delete mode 100644 vendor/github.com/docker/cli/opts/config.go delete mode 100644 vendor/github.com/docker/cli/opts/duration.go delete mode 100644 vendor/github.com/docker/cli/opts/env.go delete mode 100644 vendor/github.com/docker/cli/opts/envfile.go delete mode 100644 vendor/github.com/docker/cli/opts/file.go delete mode 100644 vendor/github.com/docker/cli/opts/hosts.go delete mode 100644 vendor/github.com/docker/cli/opts/hosts_unix.go delete mode 100644 vendor/github.com/docker/cli/opts/hosts_windows.go delete mode 100644 vendor/github.com/docker/cli/opts/ip.go delete mode 100644 vendor/github.com/docker/cli/opts/mount.go delete mode 100644 vendor/github.com/docker/cli/opts/network.go delete mode 100644 vendor/github.com/docker/cli/opts/opts.go delete mode 100644 vendor/github.com/docker/cli/opts/opts_unix.go delete mode 100644 vendor/github.com/docker/cli/opts/opts_windows.go delete mode 100644 vendor/github.com/docker/cli/opts/parse.go delete mode 100644 vendor/github.com/docker/cli/opts/port.go delete mode 100644 vendor/github.com/docker/cli/opts/quotedstring.go delete mode 100644 vendor/github.com/docker/cli/opts/runtime.go delete mode 100644 vendor/github.com/docker/cli/opts/secret.go delete mode 100644 vendor/github.com/docker/cli/opts/throttledevice.go delete mode 100644 vendor/github.com/docker/cli/opts/ulimit.go delete mode 100644 vendor/github.com/docker/cli/opts/weightdevice.go delete mode 100644 vendor/github.com/docker/distribution/AUTHORS delete mode 100644 vendor/github.com/docker/distribution/CHANGELOG.md delete mode 100644 vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/docker/pkg/mount/unmount_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go create mode 100644 vendor/github.com/godbus/dbus/.travis.yml create mode 100644 vendor/github.com/godbus/dbus/CONTRIBUTING.md create mode 100644 vendor/github.com/godbus/dbus/LICENSE create mode 100644 vendor/github.com/godbus/dbus/MAINTAINERS create mode 100644 vendor/github.com/godbus/dbus/README.markdown create mode 100644 vendor/github.com/godbus/dbus/auth.go create mode 100644 vendor/github.com/godbus/dbus/auth_external.go create mode 100644 vendor/github.com/godbus/dbus/auth_sha1.go create mode 100644 vendor/github.com/godbus/dbus/call.go create mode 100644 vendor/github.com/godbus/dbus/conn.go create mode 100644 vendor/github.com/godbus/dbus/conn_darwin.go create mode 100644 vendor/github.com/godbus/dbus/conn_other.go create mode 100644 vendor/github.com/godbus/dbus/dbus.go create mode 100644 vendor/github.com/godbus/dbus/decoder.go create mode 100644 vendor/github.com/godbus/dbus/default_handler.go create mode 100644 vendor/github.com/godbus/dbus/doc.go create mode 100644 vendor/github.com/godbus/dbus/encoder.go create mode 100644 vendor/github.com/godbus/dbus/export.go create mode 100644 vendor/github.com/godbus/dbus/homedir.go create mode 100644 vendor/github.com/godbus/dbus/homedir_dynamic.go create mode 100644 vendor/github.com/godbus/dbus/homedir_static.go create mode 100644 vendor/github.com/godbus/dbus/message.go create mode 100644 vendor/github.com/godbus/dbus/object.go create mode 100644 vendor/github.com/godbus/dbus/server_interfaces.go create mode 100644 vendor/github.com/godbus/dbus/sig.go create mode 100644 vendor/github.com/godbus/dbus/transport_darwin.go create mode 100644 vendor/github.com/godbus/dbus/transport_generic.go create mode 100644 vendor/github.com/godbus/dbus/transport_tcp.go create mode 100644 vendor/github.com/godbus/dbus/transport_unix.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_linux.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go create mode 100644 vendor/github.com/godbus/dbus/variant.go create mode 100644 vendor/github.com/godbus/dbus/variant_lexer.go create mode 100644 vendor/github.com/godbus/dbus/variant_parser.go create mode 100644 vendor/github.com/gofrs/flock/.gitignore create mode 100644 vendor/github.com/gofrs/flock/.travis.yml create mode 100644 vendor/github.com/gofrs/flock/LICENSE create mode 100644 vendor/github.com/gofrs/flock/README.md create mode 100644 vendor/github.com/gofrs/flock/appveyor.yml create mode 100644 vendor/github.com/gofrs/flock/flock.go create mode 100644 vendor/github.com/gofrs/flock/flock_unix.go create mode 100644 vendor/github.com/gofrs/flock/flock_winapi.go create mode 100644 vendor/github.com/gofrs/flock/flock_windows.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/protosize.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers_gogo.go create mode 100644 vendor/github.com/ishidawataru/sctp/.gitignore create mode 100644 vendor/github.com/ishidawataru/sctp/.travis.yml create mode 100644 vendor/github.com/ishidawataru/sctp/LICENSE create mode 100644 vendor/github.com/ishidawataru/sctp/README.md create mode 100644 vendor/github.com/ishidawataru/sctp/sctp.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp_linux.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp_unsupported.go create mode 100644 vendor/github.com/moby/buildkit/AUTHORS create mode 100644 vendor/github.com/moby/buildkit/client/ociindex/ociindex.go create mode 100644 vendor/github.com/moby/buildkit/session/content/attachable.go create mode 100644 vendor/github.com/moby/buildkit/session/content/caller.go create mode 100644 vendor/github.com/mrunalp/fileutils/.gitignore create mode 100644 vendor/github.com/mrunalp/fileutils/LICENSE create mode 100644 vendor/github.com/mrunalp/fileutils/MAINTAINERS create mode 100644 vendor/github.com/mrunalp/fileutils/README.md create mode 100644 vendor/github.com/mrunalp/fileutils/fileutils.go create mode 100644 vendor/github.com/mrunalp/fileutils/idtools.go rename vendor/github.com/opencontainers/go-digest/{LICENSE => LICENSE.code} (100%) create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/README.md create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/SPEC.md create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/kmem.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/kmem_disabled.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/network.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/validate/rootless.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/Makefile create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/generic_error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/intelrdt/intelrdt.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/intelrdt/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/message_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mount.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mount_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/mount/mountinfo.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/network_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/namespace.h create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/restored_process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/specconv/example.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/state_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/sync.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go create mode 100644 vendor/github.com/opencontainers/selinux/LICENSE create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/.gitignore create mode 100644 vendor/github.com/seccomp/libseccomp-golang/CHANGELOG create mode 100644 vendor/github.com/seccomp/libseccomp-golang/LICENSE create mode 100644 vendor/github.com/seccomp/libseccomp-golang/Makefile create mode 100644 vendor/github.com/seccomp/libseccomp-golang/README create mode 100644 vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go create mode 100644 vendor/github.com/tonistiigi/fsutil/go.mod create mode 100644 vendor/github.com/tonistiigi/fsutil/go.sum create mode 100644 vendor/github.com/urfave/cli/.flake8 create mode 100644 vendor/github.com/urfave/cli/.gitignore create mode 100644 vendor/github.com/urfave/cli/.travis.yml create mode 100644 vendor/github.com/urfave/cli/CHANGELOG.md create mode 100644 vendor/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/urfave/cli/appveyor.yml create mode 100644 vendor/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/flag-types.json create mode 100644 vendor/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/funcs.go create mode 100644 vendor/github.com/urfave/cli/generate-flag-types create mode 100644 vendor/github.com/urfave/cli/help.go create mode 100644 vendor/github.com/urfave/cli/runtests create mode 100644 vendor/github.com/vishvananda/netlink/.travis.yml create mode 100644 vendor/github.com/vishvananda/netlink/CHANGELOG.md create mode 100644 vendor/github.com/vishvananda/netlink/LICENSE create mode 100644 vendor/github.com/vishvananda/netlink/Makefile create mode 100644 vendor/github.com/vishvananda/netlink/README.md create mode 100644 vendor/github.com/vishvananda/netlink/addr.go create mode 100644 vendor/github.com/vishvananda/netlink/addr_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/bpf_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/bridge_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/class.go create mode 100644 vendor/github.com/vishvananda/netlink/class_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/conntrack_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/filter.go create mode 100644 vendor/github.com/vishvananda/netlink/filter_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/fou.go create mode 100644 vendor/github.com/vishvananda/netlink/fou_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/fou_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/genetlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/genetlink_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/gtp_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/handle_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/handle_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/ioctl_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/link.go create mode 100644 vendor/github.com/vishvananda/netlink/link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/link_tuntap_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/neigh.go create mode 100644 vendor/github.com/vishvananda/netlink/neigh_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/netlink.go create mode 100644 vendor/github.com/vishvananda/netlink/netlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/netlink_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/addr_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/bridge_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/mpls_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/route_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/seg6_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/syscall.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/tc_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/order.go create mode 100644 vendor/github.com/vishvananda/netlink/protinfo.go create mode 100644 vendor/github.com/vishvananda/netlink/protinfo_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc.go create mode 100644 vendor/github.com/vishvananda/netlink/qdisc_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/route.go create mode 100644 vendor/github.com/vishvananda/netlink/route_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/route_unspecified.go create mode 100644 vendor/github.com/vishvananda/netlink/rule.go create mode 100644 vendor/github.com/vishvananda/netlink/rule_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/socket.go create mode 100644 vendor/github.com/vishvananda/netlink/socket_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state_linux.go create mode 100644 vendor/github.com/vishvananda/netns/LICENSE create mode 100644 vendor/github.com/vishvananda/netns/README.md create mode 100644 vendor/github.com/vishvananda/netns/netns.go create mode 100644 vendor/github.com/vishvananda/netns/netns_linux.go create mode 100644 vendor/github.com/vishvananda/netns/netns_unspecified.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_aix.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go diff --git a/Dockerfile b/Dockerfile index 8725df4b1..03b1b34d2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ -ARG RUNC_VERSION=9f9c96235cc97674e935002fc3d78361b696a69e -FROM golang:1.10-alpine AS gobuild-base +ARG RUNC_VERSION=7cb3cde1f49eae53fb8fff5012c0750a64eb928b +FROM golang:1.11-alpine AS gobuild-base RUN apk add --no-cache \ bash \ build-base \ @@ -13,6 +13,7 @@ FROM gobuild-base AS runc ARG RUNC_VERSION RUN git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ && cd "$GOPATH/src/github.com/opencontainers/runc" \ + && git checkout $RUNC_VERSION \ && make static BUILDTAGS="seccomp" EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS="-extldflags \\\"-fno-PIC -static\\\"" \ && mv runc /usr/bin/runc @@ -51,10 +52,12 @@ RUN chmod u+s /usr/bin/newuidmap /usr/bin/newgidmap \ && mkdir -p /run/user/1000 \ && chown -R user /run/user/1000 /home/user \ && echo user:100000:65536 | tee /etc/subuid | tee /etc/subgid -# As of v3.8.1, Alpine does not set SUID bit on the busybox version of /bin/su. -# However, future version may set SUID bit on /bin/su. -# We lock the root account so as to disable su completely. -RUN passwd -l root +# In previous version of `alpine:3.8`, the root was not locked and su-able +# without any password when SUID bit is set on `/bin/su`. +# +# As of 3/15/2019, the root is locked by default, but we expliciltly lock the +# root just in case. +RUN passwd -l root || true FROM base AS debug RUN apk add --no-cache bash strace diff --git a/Makefile b/Makefile index f144763d8..d7246ba18 100644 --- a/Makefile +++ b/Makefile @@ -13,8 +13,10 @@ include basic.mk prebuild: runc RUNCBUILDDIR=$(BUILDDIR)/src/github.com/opencontainers/runc +RUNCCOMMIT=7cb3cde1f49eae53fb8fff5012c0750a64eb928b $(RUNCBUILDDIR): - git clone --depth 1 https://github.com/opencontainers/runc.git "$@" + git clone https://github.com/opencontainers/runc.git "$@" + ( cd "$@" ; git checkout $(RUNCCOMMIT) ) $(RUNCBUILDDIR)/runc: $(RUNCBUILDDIR) GOPATH=$(BUILDDIR) $(MAKE) -C "$(RUNCBUILDDIR)" static BUILDTAGS="seccomp apparmor" diff --git a/client/pull.go b/client/pull.go index 69c3a4092..211e3e8c3 100644 --- a/client/pull.go +++ b/client/pull.go @@ -15,6 +15,10 @@ import ( // Pull retrieves an image from a remote registry. func (c *Client) Pull(ctx context.Context, image string) (*ListedImage, error) { + sm, err := c.getSessionManager() + if err != nil { + return nil, err + } // Parse the image name and tag. named, err := reference.ParseNormalizedNamed(image) if err != nil { @@ -46,18 +50,17 @@ func (c *Client) Pull(ctx context.Context, image string) (*ListedImage, error) { // Create the source for the pull. srcOpt := containerimage.SourceOpt{ - SessionManager: opt.SessionManager, - Snapshotter: opt.Snapshotter, - ContentStore: opt.ContentStore, - Applier: opt.Applier, - CacheAccessor: cm, - ImageStore: opt.ImageStore, + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + CacheAccessor: cm, + ImageStore: opt.ImageStore, } src, err := containerimage.NewSource(srcOpt) if err != nil { return nil, err } - s, err := src.Resolve(ctx, identifier) + s, err := src.Resolve(ctx, identifier, sm) if err != nil { return nil, err } @@ -76,9 +79,8 @@ func (c *Client) Pull(ctx context.Context, image string) (*ListedImage, error) { return nil, err } expOpt := imageexporter.Opt{ - SessionManager: opt.SessionManager, - Images: opt.ImageStore, - ImageWriter: iw, + Images: opt.ImageStore, + ImageWriter: iw, } exp, err := imageexporter.New(expOpt) if err != nil { diff --git a/client/push.go b/client/push.go index e94e874f7..501f2e8ea 100644 --- a/client/push.go +++ b/client/push.go @@ -30,5 +30,9 @@ func (c *Client) Push(ctx context.Context, image string, insecure bool) error { return fmt.Errorf("getting image %q failed: %v", image, err) } - return push.Push(ctx, opt.SessionManager, opt.ContentStore, imgObj.Target.Digest, image, insecure, opt.ResolveOptionsFunc) + sm, err := c.getSessionManager() + if err != nil { + return err + } + return push.Push(ctx, sm, opt.ContentStore, imgObj.Target.Digest, image, insecure, opt.ResolveOptionsFunc) } diff --git a/client/workeropt.go b/client/workeropt.go index 921cbf2a2..7c284c52b 100644 --- a/client/workeropt.go +++ b/client/workeropt.go @@ -31,10 +31,6 @@ import ( // createWorkerOpt creates a base.WorkerOpt to be used for a new worker. func (c *Client) createWorkerOpt(withExecutor bool) (opt base.WorkerOpt, err error) { - sm, err := c.getSessionManager() - if err != nil { - return opt, err - } // Create the metadata store. md, err := metadata.NewStore(filepath.Join(c.root, "metadata.db")) if err != nil { @@ -121,7 +117,6 @@ func (c *Client) createWorkerOpt(withExecutor bool) (opt base.WorkerOpt, err err opt = base.WorkerOpt{ ID: id, Labels: xlabels, - SessionManager: sm, MetadataStore: md, Executor: exe, Snapshotter: containerdsnapshot.NewSnapshotter(mdb.Snapshotter(c.backend), contentStore, md, "buildkit", gc), diff --git a/go.mod b/go.mod index 9bf73b541..4cdc76eec 100644 --- a/go.mod +++ b/go.mod @@ -3,51 +3,41 @@ module github.com/genuinetools/img replace github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe require ( - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/Microsoft/hcsshim v0.7.4 // indirect - github.com/containerd/cgroups v0.0.0-20180917172123-5017d4e9a9cf // indirect + github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 - github.com/containerd/containerd v0.0.0-20180925133938-3bc4ba271e1c - github.com/containerd/cri v1.11.1 // indirect - github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 // indirect + github.com/containerd/containerd v1.3.0-0.20190212172151-f5b0fa220df8 github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 - github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd // indirect - github.com/coreos/go-systemd v0.0.0-20180828140353-eee3db372b31 // indirect + github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/cyphar/filepath-securejoin v0.2.2 - github.com/docker/cli v0.0.0-20180920165730-54c19e67f69c - github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f - github.com/docker/docker v0.0.0-20180924202107-a9c061deec0f - github.com/docker/docker-ce v17.12.1-ce-rc2+incompatible // indirect + github.com/docker/cli v0.0.0-20190131223713-234462756460 + github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible + github.com/docker/docker v0.7.3-0.20190315021241-05e7d000f2fc github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect github.com/docker/go-units v0.3.3 - github.com/docker/libnetwork v0.0.0-20171208192558-822e5b59d346 // indirect github.com/genuinetools/pkg v0.0.0-20180910213200-1c141f661797 github.com/genuinetools/reg v0.16.0 - github.com/godbus/dbus v4.1.0+incompatible // indirect + github.com/godbus/dbus v4.1.0+incompatible github.com/gogo/googleapis v1.1.0 // indirect - github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect - github.com/hashicorp/go-immutable-radix v1.0.0 // indirect + github.com/golang/protobuf v1.2.0 github.com/hashicorp/golang-lru v0.5.0 // indirect - github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect github.com/mitchellh/hashstructure v1.0.0 // indirect - github.com/moby/buildkit v0.3.2 - github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect + github.com/moby/buildkit v0.4.0 + github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/runc v1.0.0-rc2.0.20181113215238-10d38b660a77 - github.com/opencontainers/runtime-spec v1.0.1 // indirect + github.com/opencontainers/runtime-spec v1.0.1 + github.com/opencontainers/selinux v1.0.0 github.com/opentracing-contrib/go-stdlib v0.0.0-20180702182724-07a764486eb1 // indirect github.com/opentracing/opentracing-go v1.0.2 // indirect - github.com/pkg/errors v0.8.0 + github.com/pkg/errors v0.8.1 + github.com/seccomp/libseccomp-golang v0.9.0 github.com/sirupsen/logrus v1.0.6 - github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect - github.com/tonistiigi/fsutil v0.0.0-20181011223333-2862f6bc5ac9 // indirect - github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect - github.com/vishvananda/netlink v1.0.0 // indirect - github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect + github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 + github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 + github.com/vishvananda/netlink v1.0.0 go.etcd.io/bbolt v1.3.1-etcd.8 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e google.golang.org/grpc v1.15.0 ) diff --git a/go.sum b/go.sum index 6c58653ab..f160cd789 100644 --- a/go.sum +++ b/go.sum @@ -5,23 +5,28 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.7.4 h1:MomrXI50FljY9gWLHckIk1wP6Y9NbMzZqKVufTDfwm0= -github.com/Microsoft/hcsshim v0.7.4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.5 h1:kg/pore5Yyf4DXQ5nelSqfaYQG54YIdNeFRKJaPnFiM= +github.com/Microsoft/hcsshim v0.8.5/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM= +github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/cgroups v0.0.0-20180917172123-5017d4e9a9cf h1:MbHLFecYZF1vMNGEmtvMsjTPs9de9pZ7MTGFsZ9ZvnM= -github.com/containerd/cgroups v0.0.0-20180917172123-5017d4e9a9cf/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20181219155423-39b18af02c41 h1:5yg0k8gqOssNLsjjCtXIADoPbAtUtQZJfC8hQ4r2oFY= +github.com/containerd/cgroups v0.0.0-20181219155423-39b18af02c41/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v0.0.0-20180925133938-3bc4ba271e1c h1:cz5oiz+5G82n3ZpvsMVpLC4fvhwrG8l6LExZ+oVrr58= -github.com/containerd/containerd v0.0.0-20180925133938-3bc4ba271e1c/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.2.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-0.20190212172151-f5b0fa220df8 h1:SAFmxzP/R0bE7iYWaYWlnxAO0a4Rol5KwXuS/iD4Mu4= +github.com/containerd/containerd v1.3.0-0.20190212172151-f5b0fa220df8/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20180921161001-7f53d412b9eb h1:qSMRxG547z/BgQmyVyADxaMADQXVAD9uleP2sQeClbo= github.com/containerd/continuity v0.0.0-20180921161001-7f53d412b9eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/cri v1.11.1 h1:mmU4M2zh6yLy2jMUAhosOOuaZRoXa1CwYIxM2zNj8iI= -github.com/containerd/cri v1.11.1/go.mod h1:DavH5Qa8+6jOmeOMO3dhWoqksucZDe06LfuhBz/xPZs= +github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352 h1:CdBoaTKPl60tksFVWYc5QnLWwXBcU+XcLiXx8+NcZ9o= +github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 h1:XGyg7oTtD0DoRFhbpV6x1WfV0flKC4UxXU7ab1zC08U= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw= @@ -29,24 +34,37 @@ github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/clair v0.0.0-20180919182544-44ae4bc9590a/go.mod h1:uXhHPWAoRqw0jJc2f8RrPCwRhIo9otQ8OEWUFtpCiwA= -github.com/coreos/go-systemd v0.0.0-20180828140353-eee3db372b31 h1:wRzCUSYhBIk1KvRIlx+nvScCRIxX0iIhSU5h9xj7MUU= -github.com/coreos/go-systemd v0.0.0-20180828140353-eee3db372b31/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/cli v0.0.0-20180920165730-54c19e67f69c h1:QlAVcyoF7QQVN7zV+xYBjgwtRVlRU3WCTCpb2mcqQrM= github.com/docker/cli v0.0.0-20180920165730-54c19e67f69c/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v0.0.0-20190131223713-234462756460 h1:pil/Gt3dlnN7WIX+6uS3Ok3firOdyzmxqUtYrIik27I= +github.com/docker/cli v0.0.0-20190131223713-234462756460/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f h1:hYf+mPizfvpH6VgIxdntnOmQHd1F1mQUc1oG+j3Ol2g= github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20180924202107-a9c061deec0f h1:BrSaWINJwChqVJoPy/AR82wCtE+Sa1MAB2UiO3n2zcY= github.com/docker/docker v0.0.0-20180924202107-a9c061deec0f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20180531152204-71cd53e4a197 h1:xjQaPxUee5t0z4hTzMWDAJLQ5InCoNS5eMfRdyvG9/o= +github.com/docker/docker v0.7.3-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190315021241-05e7d000f2fc h1:foSyojC2QjkJrl3HOXwri6KYaKgDsHy9pY2wAFmaKW4= +github.com/docker/docker v0.7.3-0.20190315021241-05e7d000f2fc/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-ce v0.0.0-20180924210327-f53bd8bb8e43 h1:gZ4lWixV821UVbYtr+oz1ZPCHkbtE+ivfmHyZRgyl2Y= github.com/docker/docker-ce v0.0.0-20180924210327-f53bd8bb8e43/go.mod h1:l1FUGRYBvbjnZ8MS6A2xOji4aZFlY/Qmgz7p4oXH7ac= -github.com/docker/docker-ce v17.12.1-ce-rc2+incompatible h1:unxbSYDcG7W0w6ekgvXu+YX9+9Bub/HF2sGnJzonJqk= -github.com/docker/docker-ce v17.12.1-ce-rc2+incompatible/go.mod h1:l1FUGRYBvbjnZ8MS6A2xOji4aZFlY/Qmgz7p4oXH7ac= +github.com/docker/docker-ce v17.12.1-ce-rc2+incompatible h1:UEMGhJyWpUOlw9U1t9pQxerSF6KpQfSnD1WVbTx+9tc= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.0.0-20180821093606-97c2040d34df/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E= @@ -55,8 +73,8 @@ github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAa github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20171208192558-822e5b59d346 h1:4K1HUjUxRLG8hxAKjJjV5eZf7JjfAhIXbCKi9VBBypE= -github.com/docker/libnetwork v0.0.0-20171208192558-822e5b59d346/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libnetwork v0.0.0-20180913200009-36d3bed0e9f4 h1:PmuO8T1R9jxvOI+Y6+8YBF+um2L6xp6/F52oDeYTZkk= +github.com/docker/libnetwork v0.0.0-20180913200009-36d3bed0e9f4/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c= @@ -68,10 +86,15 @@ github.com/genuinetools/reg v0.16.0 h1:ZhLZPT+aUGHLfy45Ub5FLWik+3Dij1iwaj8A/GyAZ github.com/genuinetools/reg v0.16.0/go.mod h1:12Fe9EIvK3dG/qWhNk5e9O96I8SGmCKLsJ8GsXUbk+Y= github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g= +github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/googleapis v0.0.0-20180501115203-b23578765ee5/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0 h1:kFkMAZBNAn4j7K0GiZr8cRYzejq68VbheufiV3YuyFI= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= @@ -86,48 +109,73 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c h1:nQcv325vxv2fFHJsOt53eSRf1eINt6vOdYUFfXs4rgk= github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ishidawataru/sctp v0.0.0-20180213033435-07191f837fed h1:3MJOWnAfq3T9eoCQTarEY2DMlUWYcBkBLf03dAMvEb8= +github.com/ishidawataru/sctp v0.0.0-20180213033435-07191f837fed/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/moby/buildkit v0.3.2 h1:hJkMqPlrNN9ILdG1+0W2CCqLeTqDk59+SG/y2Mur+ZM= -github.com/moby/buildkit v0.3.2/go.mod h1:nnELdKPRkUAQR6pAB3mRU3+IlbqL3SSaAWqQL8k/K+4= +github.com/moby/buildkit v0.4.0 h1:G+Tmjcf641ydgaxznDR3NBukvUWHF/ywyepWjiEDsPg= +github.com/moby/buildkit v0.4.0/go.mod h1:MF4hMPvE6YC67Nwj70Soh0QX3igIeDct3HOOcpr0qgw= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2 h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc2.0.20181113215238-10d38b660a77 h1:htmNzC+kfhmhseOSdpsHv4ILLgcWxFXiY/0oCqk+gQA= github.com/opencontainers/runc v1.0.0-rc2.0.20181113215238-10d38b660a77/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.0.0-20180909173843-eba862dc2470/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.0.0 h1:AYFJmdZd1xjz5UIb8YpDHthdwAzlM5FVY6PzoNMgAMk= +github.com/opencontainers/selinux v1.0.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v0.0.0-20180702182724-07a764486eb1 h1:gmB1XmLjI0RXG8rJCP0PK6g8rwhX8COSGFTiOgJ4Wx4= github.com/opentracing-contrib/go-stdlib v0.0.0-20180702182724-07a764486eb1/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/peterhellberg/link v1.0.0 h1:mUWkiegowUXEcmlb+ybF75Q/8D2Y0BjZtR8cxoKhaQo= github.com/peterhellberg/link v1.0.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.0.0-20180924113449-f69c853d21c1 h1:mEzWvBiJdUbhqHRT6kNSGzD6IDcWCWF2uAhrEEE740M= @@ -138,19 +186,30 @@ github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrO github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7 h1:NgR6WN8nQ4SmFC1sSUHY8SriLuWCZ6cCIQtH4vDZN3c= github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/seccomp/libseccomp-golang v0.9.0 h1:S1pmhdFh5spQtVojA+4GUdWBqvI8ydYHxrx8iR6xN8o= +github.com/seccomp/libseccomp-golang v0.9.0/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tonistiigi/fsutil v0.0.0-20181011223333-2862f6bc5ac9 h1:w/+9MapoT4VpbsZzMILdG896EygCj14r82Tv1J7JFpw= -github.com/tonistiigi/fsutil v0.0.0-20181011223333-2862f6bc5ac9/go.mod h1:eden9dLzAAuNQ0L7whFr6/Mzgz6btsvQpUnxOOI+CCE= +github.com/tonistiigi/fsutil v0.0.0-20190130224639-b4281fa67095 h1:b9hxvP2uTwPNrzo4q77+g25UgefFB3+f7i5IpRvlzDs= +github.com/tonistiigi/fsutil v0.0.0-20190130224639-b4281fa67095/go.mod h1:UYukpgbLYvlEDwyQ/SqtLJ9QlgYORCPEwDln/Noj7xc= github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe h1:pd7hrFSqUPxYS9IB+UMG1AB/8EXGXo17ssx0bSQ5L6Y= github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe/go.mod h1:/+MCh11CJf2oz0BXmlmqyopK/ad1rKkcOXPoYuPCJYU= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= +github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.2.1/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= @@ -159,6 +218,8 @@ go.etcd.io/bbolt v1.3.1-etcd.8 h1:6J7QAKqfFBGnU80KRnuQxfjjeE5xAGE/qB810I3FQHQ= go.etcd.io/bbolt v1.3.1-etcd.8/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190129210102-ccddf3741a0c h1:MWY7h75sb9ioBR+s5Zgq1JYXxhbZvrSP2okwLi3ItmI= +golang.org/x/crypto v0.0.0-20190129210102-ccddf3741a0c/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -173,19 +234,24 @@ golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e h1:LSlw/Dbj0MkNvPYAAkGinYmGl golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20170523043604-d80a6e20e776/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180924164928-221a8d4f7494 h1:WIJ3k0fGJRrCVzZTuGmcBnUzWeSDpWiP+jUOxWkA8bo= google.golang.org/genproto v0.0.0-20180924164928-221a8d4f7494/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.15.0 h1:Az/KuahOM4NAidTEuJCv/RonAA7rYsTPkqXVjr+8OOw= google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= @@ -196,4 +262,6 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.1.0+incompatible h1:5USw7CrJBYKqjg9R7QlA6jzqZKEAtvW82aNmsxxGPxw= gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/login.go b/login.go index 5953480da..1fe36ba9a 100644 --- a/login.go +++ b/login.go @@ -13,7 +13,8 @@ import ( "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/configfile" - "github.com/docker/docker/api/types" + "github.com/docker/cli/cli/config/types" + dockerapitypes "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" registryapi "github.com/genuinetools/reg/registry" @@ -88,7 +89,7 @@ func (cmd *loginCommand) Run(ctx context.Context, args []string) error { } // Attempt to login to the registry. - r, err := registryapi.New(authConfig, registryapi.Opt{Debug: debug}) + r, err := registryapi.New(cliconfigtypes2dockerapitypes(authConfig), registryapi.Opt{Debug: debug}) if err != nil { return fmt.Errorf("creating registry client failed: %v", err) } @@ -196,3 +197,15 @@ func promptWithDefault(out io.Writer, prompt string, configDefault string) { fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) } + +func cliconfigtypes2dockerapitypes(in types.AuthConfig) dockerapitypes.AuthConfig { + return dockerapitypes.AuthConfig{ + Username: in.Username, + Password: in.Password, + Auth: in.Auth, + Email: in.Email, + ServerAddress: in.ServerAddress, + IdentityToken: in.IdentityToken, + RegistryToken: in.RegistryToken, + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/.gometalinter.json b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json new file mode 100644 index 000000000..00e9a6e2e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json @@ -0,0 +1,17 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": [ + "linter", + "severity", + "path", + "line" + ], + "Skip": [ + "internal\\schema2" + ], + "EnableGC": true, + "Enable": [ + "gofmt" + ] +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/vendor/github.com/Microsoft/hcsshim/appveyor.yml index 8d12f59d1..a8ec5a593 100644 --- a/vendor/github.com/Microsoft/hcsshim/appveyor.yml +++ b/vendor/github.com/Microsoft/hcsshim/appveyor.yml @@ -6,14 +6,24 @@ clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim environment: GOPATH: c:\gopath - PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%PATH% + PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH% + +stack: go 1.11 build_script: - - go get -v -t ./... + - appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip + - 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL + - gometalinter.exe --config .gometalinter.json ./... - go build ./cmd/wclayer - go build ./cmd/runhcs + - go build ./cmd/tar2ext4 - go test -v ./... -tags admin + - go test -c ./test/functional/ -tags functional + - go test -c ./test/runhcs/ -tags integration artifacts: - path: 'wclayer.exe' - path: 'runhcs.exe' + - path: 'tar2ext4.exe' + - path: 'functional.test.exe' + - path: 'runhcs.test.exe' \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index 5f0dcfe75..eb013d2c4 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -7,6 +7,9 @@ import ( // HNSEndpoint represents a network endpoint in HNS type HNSEndpoint = hns.HNSEndpoint +// Namespace represents a Compartment. +type Namespace = hns.Namespace + //SystemType represents the type of the system on which actions are done type SystemType string diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go index 2724624fd..5b91e0cc5 100644 --- a/vendor/github.com/Microsoft/hcsshim/interface.go +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -17,6 +17,11 @@ type MappedPipe = schema1.MappedPipe type HvRuntime = schema1.HvRuntime type MappedVirtualDisk = schema1.MappedVirtualDisk +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice = schema1.AssignedDevice + // ContainerConfig is used as both the input of CreateContainer // and to convert the parameters to JSON for passing onto the HCS type ContainerConfig = schema1.ContainerConfig diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go new file mode 100644 index 000000000..5d3d0dfef --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go @@ -0,0 +1,100 @@ +package guestrequest + +import ( + "github.com/Microsoft/hcsshim/internal/schema2" +) + +// Arguably, many of these (at least CombinedLayers) should have been generated +// by swagger. +// +// This will also change package name due to an inbound breaking change. + +// This class is used by a modify request to add or remove a combined layers +// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath +// using the specified layers as the parent content. Ignores property ScratchPath +// since the container path is already the scratch path. For linux, the GCS unions +// the specified layers and ScratchPath together, placing the resulting union +// filesystem at ContainerRootPath. +type CombinedLayers struct { + ContainerRootPath string `json:"ContainerRootPath,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` + ScratchPath string `json:"ScratchPath,omitempty"` +} + +// Defines the schema for hosted settings passed to GCS and/or OpenGCS + +// SCSI. Scratch space for remote file-system commands, or R/W layer for containers +type LCOWMappedVirtualDisk struct { + MountPath string `json:"MountPath,omitempty"` // /tmp/scratch for an LCOW utility VM being used as a service VM + Lun uint8 `json:"Lun,omitempty"` + Controller uint8 `json:"Controller,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty"` +} + +type WCOWMappedVirtualDisk struct { + ContainerPath string `json:"ContainerPath,omitempty"` + Lun int32 `json:"Lun,omitempty"` +} + +type LCOWMappedDirectory struct { + MountPath string `json:"MountPath,omitempty"` + Port int32 `json:"Port,omitempty"` + ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported) + ReadOnly bool `json:"ReadOnly,omitempty"` +} + +// Read-only layers over VPMem +type LCOWMappedVPMemDevice struct { + DeviceNumber uint32 `json:"DeviceNumber,omitempty"` + MountPath string `json:"MountPath,omitempty"` // /tmp/pN +} + +type LCOWNetworkAdapter struct { + NamespaceID string `json:",omitempty"` + ID string `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress string `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` +} + +type ResourceType string + +const ( + // These are constants for v2 schema modify guest requests. + ResourceTypeMappedDirectory ResourceType = "MappedDirectory" + ResourceTypeMappedVirtualDisk ResourceType = "MappedVirtualDisk" + ResourceTypeNetwork ResourceType = "Network" + ResourceTypeNetworkNamespace ResourceType = "NetworkNamespace" + ResourceTypeCombinedLayers ResourceType = "CombinedLayers" + ResourceTypeVPMemDevice ResourceType = "VPMemDevice" +) + +// GuestRequest is for modify commands passed to the guest. +type GuestRequest struct { + RequestType string `json:"RequestType,omitempty"` + ResourceType ResourceType `json:"ResourceType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type NetworkModifyRequest struct { + AdapterId string `json:"AdapterId,omitempty"` + RequestType string `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type RS4NetworkModifyRequest struct { + AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` + RequestType string `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +// SignalProcessOptions is the options passed to either WCOW or LCOW +// to signal a given process. +type SignalProcessOptions struct { + Signal int `json:,omitempty` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go index e41c40ec8..f9a922a4b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -5,6 +5,7 @@ import ( "syscall" "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" ) var ( @@ -15,11 +16,20 @@ var ( notificationWatcherCallback = syscall.NewCallback(notificationWatcher) // Notifications for HCS_SYSTEM handles - hcsNotificationSystemExited hcsNotification = 0x00000001 - hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 - hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 - hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 - hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemCrashReport hcsNotification = 0x00000006 + hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 + hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 + hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 + hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A + hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B + hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C + hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D + hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E // Notifications for HCS_PROCESS handles hcsNotificationProcessExited hcsNotification = 0x00010000 @@ -49,16 +59,23 @@ func newChannels() notificationChannels { channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1) channels[hcsNotificationProcessExited] = make(notificationChannel, 1) channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1) + channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1) + channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1) + channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1) + channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1) + channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1) + channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1) + return channels } + func closeChannels(channels notificationChannels) { - close(channels[hcsNotificationSystemExited]) - close(channels[hcsNotificationSystemCreateCompleted]) - close(channels[hcsNotificationSystemStartCompleted]) - close(channels[hcsNotificationSystemPauseCompleted]) - close(channels[hcsNotificationSystemResumeCompleted]) - close(channels[hcsNotificationProcessExited]) - close(channels[hcsNotificationServiceDisconnect]) + for _, c := range channels { + close(c) + } } func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { @@ -75,7 +92,13 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt return 0 } - context.channels[notificationType] <- result + if channel, ok := context.channels[notificationType]; ok { + channel <- result + } else { + logrus.WithFields(logrus.Fields{ + "notification-type": notificationType, + }).Warn("Received a callback of an unsupported type") + } return 0 } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 7471f5cc1..079b56535 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -7,6 +7,7 @@ import ( "syscall" "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/sirupsen/logrus" ) @@ -72,6 +73,9 @@ var ( // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly + ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) + // ErrNotSupported is an error encountered when hcs doesn't support the request ErrPlatformNotSupported = errors.New("unsupported platform request") ) @@ -116,10 +120,14 @@ func (ev *ErrorEvent) String() string { func processHcsResult(resultp *uint16) []ErrorEvent { if resultp != nil { resultj := interop.ConvertAndFreeCoTaskMemString(resultp) - logrus.Debugf("Result: %s", resultj) + logrus.WithField(logfields.JSON, resultj). + Debug("HCS Result") result := &hcsResult{} if err := json.Unmarshal([]byte(resultj), result); err != nil { - logrus.Warnf("Could not unmarshal HCS result %s: %s", resultj, err) + logrus.WithFields(logrus.Fields{ + logfields.JSON: resultj, + logrus.ErrorKey: err, + }).Warning("Could not unmarshal HCS result") return nil } return result.ErrorEvents diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go index b8e30eba1..b0d49cbcf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go @@ -27,6 +27,7 @@ import ( //sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? //sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess? //sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? //sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? //sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? //sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go new file mode 100644 index 000000000..90d164e35 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go @@ -0,0 +1,15 @@ +package hcs + +import "github.com/sirupsen/logrus" + +func logOperationBegin(ctx logrus.Fields, msg string) { + logrus.WithFields(ctx).Debug(msg) +} + +func logOperationEnd(ctx logrus.Fields, msg string, err error) { + if err == nil { + logrus.WithFields(ctx).Debug(msg) + } else { + logrus.WithFields(ctx).WithError(err).Error(msg) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 8294d66d7..4c35c732c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -2,13 +2,14 @@ package hcs import ( "encoding/json" - "fmt" "io" "sync" "syscall" "time" + "github.com/Microsoft/hcsshim/internal/guestrequest" "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/sirupsen/logrus" ) @@ -20,6 +21,21 @@ type Process struct { system *System cachedPipes *cachedPipes callbackNumber uintptr + + logctx logrus.Fields +} + +func newProcess(process hcsProcess, processID int, computeSystem *System) *Process { + return &Process{ + handle: process, + processID: processID, + system: computeSystem, + logctx: logrus.Fields{ + logfields.HCSOperation: "", + logfields.ContainerID: computeSystem.ID(), + logfields.ProcessID: processID, + }, + } } type cachedPipes struct { @@ -71,70 +87,122 @@ func (process *Process) SystemID() string { return process.system.ID() } +func (process *Process) logOperationBegin(operation string) { + process.logctx[logfields.HCSOperation] = operation + logOperationBegin( + process.logctx, + "hcsshim::Process - Begin Operation") +} + +func (process *Process) logOperationEnd(err error) { + var result string + if err == nil { + result = "Success" + } else { + result = "Error" + } + + logOperationEnd( + process.logctx, + "hcsshim::Process - End Operation - "+result, + err) + process.logctx[logfields.HCSOperation] = "" +} + +// Signal signals the process with `options`. +func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::Signal" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + optionsb, err := json.Marshal(options) + if err != nil { + return err + } + + optionsStr := string(optionsb) + + var resultp *uint16 + syscallWatcher(process.logctx, func() { + err = hcsSignalProcess(process.handle, optionsStr, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + // Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *Process) Kill() error { +func (process *Process) Kill() (err error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "Kill" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) + + operation := "hcsshim::Process::Kill" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() if process.handle == 0 { return makeProcessError(process, operation, ErrAlreadyClosed, nil) } var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("TerminateProcess %s: %d", process.SystemID(), process.Pid()), &completed) - err := hcsTerminateProcess(process.handle, &resultp) - completed = true + syscallWatcher(process.logctx, func() { + err = hcsTerminateProcess(process.handle, &resultp) + }) events := processHcsResult(resultp) if err != nil { return makeProcessError(process, operation, err, events) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return nil } // Wait waits for the process to exit. -func (process *Process) Wait() error { - operation := "Wait" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) +func (process *Process) Wait() (err error) { + operation := "hcsshim::Process::Wait" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() - err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) + err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) if err != nil { return makeProcessError(process, operation, err, nil) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return nil } // WaitTimeout waits for the process to exit or the duration to elapse. It returns // false if timeout occurs. -func (process *Process) WaitTimeout(timeout time.Duration) error { - operation := "WaitTimeout" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) +func (process *Process) WaitTimeout(timeout time.Duration) (err error) { + operation := "hcssshim::Process::WaitTimeout" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() - err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) + err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) if err != nil { return makeProcessError(process, operation, err, nil) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return nil } // ResizeConsole resizes the console of the process. -func (process *Process) ResizeConsole(width, height uint16) error { +func (process *Process) ResizeConsole(width, height uint16) (err error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "ResizeConsole" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) + + operation := "hcsshim::Process::ResizeConsole" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() if process.handle == 0 { return makeProcessError(process, operation, ErrAlreadyClosed, nil) @@ -162,16 +230,16 @@ func (process *Process) ResizeConsole(width, height uint16) error { return makeProcessError(process, operation, err, events) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return nil } -func (process *Process) Properties() (*ProcessStatus, error) { +func (process *Process) Properties() (_ *ProcessStatus, err error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "Properties" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) + + operation := "hcsshim::Process::Properties" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() if process.handle == 0 { return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) @@ -181,10 +249,9 @@ func (process *Process) Properties() (*ProcessStatus, error) { resultp *uint16 propertiesp *uint16 ) - completed := false - go syscallWatcher(fmt.Sprintf("GetProcessProperties %s: %d", process.SystemID(), process.Pid()), &completed) - err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp) - completed = true + syscallWatcher(process.logctx, func() { + err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp) + }) events := processHcsResult(resultp) if err != nil { return nil, makeProcessError(process, operation, err, events) @@ -200,14 +267,16 @@ func (process *Process) Properties() (*ProcessStatus, error) { return nil, makeProcessError(process, operation, err, nil) } - logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw) return properties, nil } // ExitCode returns the exit code of the process. The process must have // already terminated. -func (process *Process) ExitCode() (int, error) { - operation := "ExitCode" +func (process *Process) ExitCode() (_ int, err error) { + operation := "hcsshim::Process::ExitCode" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() + properties, err := process.Properties() if err != nil { return 0, makeProcessError(process, operation, err, nil) @@ -227,12 +296,13 @@ func (process *Process) ExitCode() (int, error) { // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing // these pipes does not close the underlying pipes; it should be possible to // call this multiple times to get multiple interfaces. -func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { +func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "Stdio" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) + + operation := "hcsshim::Process::Stdio" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() if process.handle == 0 { return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) @@ -245,7 +315,7 @@ func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e processInfo hcsProcessInformation resultp *uint16 ) - err := hcsGetProcessInfo(process.handle, &processInfo, &resultp) + err = hcsGetProcessInfo(process.handle, &processInfo, &resultp) events := processHcsResult(resultp) if err != nil { return nil, nil, nil, makeProcessError(process, operation, err, events) @@ -265,18 +335,18 @@ func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e return nil, nil, nil, makeProcessError(process, operation, err, nil) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return pipes[0], pipes[1], pipes[2], nil } // CloseStdin closes the write side of the stdin pipe so that the process is // notified on the read side that there is no more data in stdin. -func (process *Process) CloseStdin() error { +func (process *Process) CloseStdin() (err error) { process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "CloseStdin" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) + + operation := "hcsshim::Process::CloseStdin" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() if process.handle == 0 { return makeProcessError(process, operation, ErrAlreadyClosed, nil) @@ -303,35 +373,34 @@ func (process *Process) CloseStdin() error { return makeProcessError(process, operation, err, events) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return nil } // Close cleans up any state associated with the process but does not kill // or wait on it. -func (process *Process) Close() error { +func (process *Process) Close() (err error) { process.handleLock.Lock() defer process.handleLock.Unlock() - operation := "Close" - title := "hcsshim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) + + operation := "hcsshim::Process::Close" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(err) }() // Don't double free this if process.handle == 0 { return nil } - if err := process.unregisterCallback(); err != nil { + if err = process.unregisterCallback(); err != nil { return makeProcessError(process, operation, err, nil) } - if err := hcsCloseProcess(process.handle); err != nil { + if err = hcsCloseProcess(process.handle); err != nil { return makeProcessError(process, operation, err, nil) } process.handle = 0 - logrus.Debugf(title+" succeeded processid=%d", process.processID) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 57afd5ec6..50f768b72 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -2,7 +2,6 @@ package hcs import ( "encoding/json" - "fmt" "os" "strconv" "sync" @@ -10,6 +9,7 @@ import ( "time" "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/schema1" "github.com/Microsoft/hcsshim/internal/timeout" "github.com/sirupsen/logrus" @@ -41,36 +41,72 @@ type System struct { handle hcsSystem id string callbackNumber uintptr -} -// CreateComputeSystem creates a new compute system with the given configuration but does not start it. -func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System, error) { - operation := "CreateComputeSystem" - title := "hcsshim::" + operation + logctx logrus.Fields +} - computeSystem := &System{ +func newSystem(id string) *System { + return &System{ id: id, + logctx: logrus.Fields{ + logfields.HCSOperation: "", + logfields.ContainerID: id, + }, + } +} + +func (computeSystem *System) logOperationBegin(operation string) { + computeSystem.logctx[logfields.HCSOperation] = operation + logOperationBegin( + computeSystem.logctx, + "hcsshim::ComputeSystem - Begin Operation") +} + +func (computeSystem *System) logOperationEnd(err error) { + var result string + if err == nil { + result = "Success" + } else { + result = "Error" } + logOperationEnd( + computeSystem.logctx, + "hcsshim::ComputeSystem - End Operation - "+result, + err) + computeSystem.logctx[logfields.HCSOperation] = "" +} + +// CreateComputeSystem creates a new compute system with the given configuration but does not start it. +func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcsshim::CreateComputeSystem" + + computeSystem := newSystem(id) + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() + hcsDocumentB, err := json.Marshal(hcsDocumentInterface) if err != nil { return nil, err } hcsDocument := string(hcsDocumentB) - logrus.Debugf(title+" ID=%s config=%s", id, hcsDocument) + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, hcsDocument). + Debug("HCS ComputeSystem Document") var ( - resultp *uint16 - identity syscall.Handle + resultp *uint16 + identity syscall.Handle + createError error ) - completed := false - go syscallWatcher(fmt.Sprintf("CreateCompleteSystem %s: %s", id, hcsDocument), &completed) - createError := hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp) + }) if createError == nil || IsPending(createError) { - if err := computeSystem.registerCallback(); err != nil { + if err = computeSystem.registerCallback(); err != nil { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. computeSystem.Terminate() @@ -88,25 +124,28 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System, return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events) } - logrus.Debugf(title+" succeeded id=%s handle=%d", id, computeSystem.handle) return computeSystem, nil } // OpenComputeSystem opens an existing compute system by ID. -func OpenComputeSystem(id string) (*System, error) { - operation := "OpenComputeSystem" - title := "hcsshim::" + operation - logrus.Debugf(title+" ID=%s", id) - - computeSystem := &System{ - id: id, - } +func OpenComputeSystem(id string) (_ *System, err error) { + operation := "hcsshim::OpenComputeSystem" + + computeSystem := newSystem(id) + computeSystem.logOperationBegin(operation) + defer func() { + if IsNotExist(err) { + computeSystem.logOperationEnd(nil) + } else { + computeSystem.logOperationEnd(err) + } + }() var ( handle hcsSystem resultp *uint16 ) - err := hcsOpenComputeSystem(id, &handle, &resultp) + err = hcsOpenComputeSystem(id, &handle, &resultp) events := processHcsResult(resultp) if err != nil { return nil, makeSystemError(computeSystem, operation, "", err, events) @@ -114,18 +153,36 @@ func OpenComputeSystem(id string) (*System, error) { computeSystem.handle = handle - if err := computeSystem.registerCallback(); err != nil { + if err = computeSystem.registerCallback(); err != nil { return nil, makeSystemError(computeSystem, operation, "", err, nil) } - logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle) return computeSystem, nil } // GetComputeSystems gets a list of the compute systems on the system that match the query -func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { - operation := "GetComputeSystems" - title := "hcsshim::" + operation +func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) { + operation := "hcsshim::GetComputeSystems" + fields := logrus.Fields{ + logfields.HCSOperation: operation, + } + logOperationBegin( + fields, + "hcsshim::ComputeSystem - Begin Operation") + + defer func() { + var result string + if err == nil { + result = "Success" + } else { + result = "Error" + } + + logOperationEnd( + fields, + "hcsshim::ComputeSystem - End Operation - "+result, + err) + }() queryb, err := json.Marshal(q) if err != nil { @@ -133,16 +190,19 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerPropert } query := string(queryb) - logrus.Debugf(title+" query=%s", query) + + logrus.WithFields(fields). + WithField(logfields.JSON, query). + Debug("HCS ComputeSystem Query") var ( resultp *uint16 computeSystemsp *uint16 ) - completed := false - go syscallWatcher(fmt.Sprintf("GetComputeSystems %s:", query), &completed) - err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) - completed = true + + syscallWatcher(fields, func() { + err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + }) events := processHcsResult(resultp) if err != nil { return nil, &HcsError{Op: operation, Err: err, Events: events} @@ -153,20 +213,21 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerPropert } computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp) computeSystems := []schema1.ContainerProperties{} - if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil { + if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil { return nil, err } - logrus.Debugf(title + " succeeded") return computeSystems, nil } // Start synchronously starts the computeSystem. -func (computeSystem *System) Start() error { +func (computeSystem *System) Start() (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::Start ID=" + computeSystem.ID() - logrus.Debugf(title) + + operation := "hcsshim::ComputeSystem::Start" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() if computeSystem.handle == 0 { return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil) @@ -199,16 +260,14 @@ func (computeSystem *System) Start() error { } var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("StartComputeSystem %s:", computeSystem.ID()), &completed) - err := hcsStartComputeSystem(computeSystem.handle, "", &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsStartComputeSystem(computeSystem.handle, "", &resultp) + }) events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) if err != nil { return makeSystemError(computeSystem, "Start", "", err, events) } - logrus.Debugf(title + " succeeded") return nil } @@ -219,98 +278,133 @@ func (computeSystem *System) ID() string { // Shutdown requests a compute system shutdown, if IsPending() on the error returned is true, // it may not actually be shut down until Wait() succeeds. -func (computeSystem *System) Shutdown() error { +func (computeSystem *System) Shutdown() (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::Shutdown" - logrus.Debugf(title) + + operation := "hcsshim::ComputeSystem::Shutdown" + computeSystem.logOperationBegin(operation) + defer func() { + if IsAlreadyStopped(err) { + computeSystem.logOperationEnd(nil) + } else { + computeSystem.logOperationEnd(err) + } + }() + if computeSystem.handle == 0 { return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil) } var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("ShutdownComputeSystem %s:", computeSystem.ID()), &completed) - err := hcsShutdownComputeSystem(computeSystem.handle, "", &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp) + }) events := processHcsResult(resultp) if err != nil { return makeSystemError(computeSystem, "Shutdown", "", err, events) } - logrus.Debugf(title + " succeeded") return nil } // Terminate requests a compute system terminate, if IsPending() on the error returned is true, // it may not actually be shut down until Wait() succeeds. -func (computeSystem *System) Terminate() error { +func (computeSystem *System) Terminate() (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::Terminate ID=" + computeSystem.ID() - logrus.Debugf(title) + + operation := "hcsshim::ComputeSystem::Terminate" + computeSystem.logOperationBegin(operation) + defer func() { + if IsPending(err) { + computeSystem.logOperationEnd(nil) + } else { + computeSystem.logOperationEnd(err) + } + }() if computeSystem.handle == 0 { return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil) } var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("TerminateComputeSystem %s:", computeSystem.ID()), &completed) - err := hcsTerminateComputeSystem(computeSystem.handle, "", &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp) + }) events := processHcsResult(resultp) - if err != nil { + if err != nil && err != ErrVmcomputeAlreadyStopped { return makeSystemError(computeSystem, "Terminate", "", err, events) } - logrus.Debugf(title + " succeeded") return nil } // Wait synchronously waits for the compute system to shutdown or terminate. -func (computeSystem *System) Wait() error { - title := "hcsshim::ComputeSystem::Wait ID=" + computeSystem.ID() - logrus.Debugf(title) +func (computeSystem *System) Wait() (err error) { + operation := "hcsshim::ComputeSystem::Wait" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() - err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) if err != nil { return makeSystemError(computeSystem, "Wait", "", err, nil) } - logrus.Debugf(title + " succeeded") + return nil +} + +// WaitExpectedError synchronously waits for the compute system to shutdown or +// terminate, and ignores the passed error if it occurs. +func (computeSystem *System) WaitExpectedError(expected error) (err error) { + operation := "hcsshim::ComputeSystem::WaitExpectedError" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() + + err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + if err != nil && err != expected { + return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil) + } + return nil } // WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse. // If the timeout expires, IsTimeout(err) == true -func (computeSystem *System) WaitTimeout(timeout time.Duration) error { - title := "hcsshim::ComputeSystem::WaitTimeout ID=" + computeSystem.ID() - logrus.Debugf(title) +func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) { + operation := "hcsshim::ComputeSystem::WaitTimeout" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() - err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout) + err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout) if err != nil { return makeSystemError(computeSystem, "WaitTimeout", "", err, nil) } - logrus.Debugf(title + " succeeded") return nil } -func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { +func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() + operation := "hcsshim::ComputeSystem::Properties" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() + queryj, err := json.Marshal(schema1.PropertyQuery{types}) if err != nil { return nil, makeSystemError(computeSystem, "Properties", "", err, nil) } + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, queryj). + Debug("HCS ComputeSystem Properties Query") + var resultp, propertiesp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("GetComputeSystemProperties %s:", computeSystem.ID()), &completed) - err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp) + }) events := processHcsResult(resultp) if err != nil { return nil, makeSystemError(computeSystem, "Properties", "", err, events) @@ -324,64 +418,69 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1 if err := json.Unmarshal(propertiesRaw, properties); err != nil { return nil, makeSystemError(computeSystem, "Properties", "", err, nil) } + return properties, nil } // Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Pause() error { +func (computeSystem *System) Pause() (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::Pause ID=" + computeSystem.ID() - logrus.Debugf(title) + + operation := "hcsshim::ComputeSystem::Pause" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() if computeSystem.handle == 0 { return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil) } var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("PauseComputeSystem %s:", computeSystem.ID()), &completed) - err := hcsPauseComputeSystem(computeSystem.handle, "", &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp) + }) events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) if err != nil { return makeSystemError(computeSystem, "Pause", "", err, events) } - logrus.Debugf(title + " succeeded") return nil } // Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Resume() error { +func (computeSystem *System) Resume() (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::Resume ID=" + computeSystem.ID() - logrus.Debugf(title) + + operation := "hcsshim::ComputeSystem::Resume" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() if computeSystem.handle == 0 { return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil) } var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("ResumeComputeSystem %s:", computeSystem.ID()), &completed) - err := hcsResumeComputeSystem(computeSystem.handle, "", &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp) + }) events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) if err != nil { return makeSystemError(computeSystem, "Resume", "", err, events) } - logrus.Debugf(title + " succeeded") return nil } // CreateProcess launches a new process within the computeSystem. -func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) { +func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::CreateProcess ID=" + computeSystem.ID() + + operation := "hcsshim::ComputeSystem::CreateProcess" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() + var ( processInfo hcsProcessInformation processHandle hcsProcess @@ -398,42 +497,50 @@ func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) { } configuration := string(configurationb) - logrus.Debugf(title+" config=%s", configuration) - completed := false - go syscallWatcher(fmt.Sprintf("CreateProcess %s: %s", computeSystem.ID(), configuration), &completed) - err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp) - completed = true + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, configuration). + Debug("HCS ComputeSystem Process Document") + + syscallWatcher(computeSystem.logctx, func() { + err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp) + }) events := processHcsResult(resultp) if err != nil { return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events) } - process := &Process{ - handle: processHandle, - processID: int(processInfo.ProcessId), - system: computeSystem, - cachedPipes: &cachedPipes{ - stdIn: processInfo.StdInput, - stdOut: processInfo.StdOutput, - stdErr: processInfo.StdError, - }, + logrus.WithFields(computeSystem.logctx). + WithField(logfields.ProcessID, processInfo.ProcessId). + Debug("HCS ComputeSystem CreateProcess PID") + + process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem) + process.cachedPipes = &cachedPipes{ + stdIn: processInfo.StdInput, + stdOut: processInfo.StdOutput, + stdErr: processInfo.StdError, } - if err := process.registerCallback(); err != nil { + if err = process.registerCallback(); err != nil { return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil) } - logrus.Debugf(title+" succeeded processid=%d", process.processID) return process, nil } // OpenProcess gets an interface to an existing process within the computeSystem. -func (computeSystem *System) OpenProcess(pid int) (*Process, error) { +func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::ComputeSystem::OpenProcess ID=" + computeSystem.ID() - logrus.Debugf(title+" processid=%d", pid) + + // Add PID for the context of this operation + computeSystem.logctx[logfields.ProcessID] = pid + defer delete(computeSystem.logctx, logfields.ProcessID) + + operation := "hcsshim::ComputeSystem::OpenProcess" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() + var ( processHandle hcsProcess resultp *uint16 @@ -443,56 +550,49 @@ func (computeSystem *System) OpenProcess(pid int) (*Process, error) { return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil) } - completed := false - go syscallWatcher(fmt.Sprintf("OpenProcess %s: %d", computeSystem.ID(), pid), &completed) - err := hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp) + }) events := processHcsResult(resultp) if err != nil { return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events) } - process := &Process{ - handle: processHandle, - processID: pid, - system: computeSystem, - } - - if err := process.registerCallback(); err != nil { + process := newProcess(processHandle, pid, computeSystem) + if err = process.registerCallback(); err != nil { return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil) } - logrus.Debugf(title+" succeeded processid=%s", process.processID) return process, nil } // Close cleans up any state associated with the compute system but does not terminate or wait for it. -func (computeSystem *System) Close() error { +func (computeSystem *System) Close() (err error) { computeSystem.handleLock.Lock() defer computeSystem.handleLock.Unlock() - title := "hcsshim::ComputeSystem::Close ID=" + computeSystem.ID() - logrus.Debugf(title) + + operation := "hcsshim::ComputeSystem::Close" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() // Don't double free this if computeSystem.handle == 0 { return nil } - if err := computeSystem.unregisterCallback(); err != nil { + if err = computeSystem.unregisterCallback(); err != nil { return makeSystemError(computeSystem, "Close", "", err, nil) } - completed := false - go syscallWatcher(fmt.Sprintf("CloseComputeSystem %s:", computeSystem.ID()), &completed) - err := hcsCloseComputeSystem(computeSystem.handle) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsCloseComputeSystem(computeSystem.handle) + }) if err != nil { return makeSystemError(computeSystem, "Close", "", err, nil) } computeSystem.handle = 0 - logrus.Debugf(title + " succeeded") return nil } @@ -553,11 +653,14 @@ func (computeSystem *System) unregisterCallback() error { return nil } -// Modifies the System by sending a request to HCS -func (computeSystem *System) Modify(config interface{}) error { +// Modify the System by sending a request to HCS +func (computeSystem *System) Modify(config interface{}) (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() - title := "hcsshim::Modify ID=" + computeSystem.id + + operation := "hcsshim::ComputeSystem::Modify" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(err) }() if computeSystem.handle == 0 { return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil) @@ -569,17 +672,19 @@ func (computeSystem *System) Modify(config interface{}) error { } requestString := string(requestJSON) - logrus.Debugf(title + " " + requestString) + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, requestString). + Debug("HCS ComputeSystem Modify Document") var resultp *uint16 - completed := false - go syscallWatcher(fmt.Sprintf("ModifyComputeSystem %s: %s", computeSystem.ID(), requestString), &completed) - err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp) - completed = true + syscallWatcher(computeSystem.logctx, func() { + err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp) + }) events := processHcsResult(resultp) if err != nil { return makeSystemError(computeSystem, "Modify", requestString, err, events) } - logrus.Debugf(title + " succeeded ") + return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go index 6b94bc9ff..f85ed3187 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go @@ -1,8 +1,9 @@ package hcs import ( - "time" + "context" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/timeout" "github.com/sirupsen/logrus" ) @@ -16,15 +17,25 @@ import ( // // Usage is: // -// completed := false -// go syscallWatcher("some description", &completed) -// -// completed = true +// syscallWatcher(logContext, func() { +// err = (args...) +// }) // -func syscallWatcher(description string, syscallCompleted *bool) { - time.Sleep(timeout.SyscallWatcher) - if *syscallCompleted { - return + +func syscallWatcher(logContext logrus.Fields, syscallLambda func()) { + ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher) + defer cancel() + go watchFunc(ctx, logContext) + syscallLambda() +} + +func watchFunc(ctx context.Context, logContext logrus.Fields) { + select { + case <-ctx.Done(): + if ctx.Err() != context.Canceled { + logrus.WithFields(logContext). + WithField(logfields.Timeout, timeout.SyscallWatcher). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + } } - logrus.Warnf("%s: Did not complete within %s. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see is there is a syscall stuck in the platform API for a significant length of time.", description, timeout.SyscallWatcher) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go index 48d5cd32b..fcd5cdc87 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated mksyscall_windows.exe DO NOT EDIT package hcs @@ -6,7 +6,6 @@ import ( "syscall" "unsafe" - "github.com/Microsoft/hcsshim/internal/interop" "golang.org/x/sys/windows" ) @@ -57,12 +56,13 @@ var ( procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") - procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") + + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") ) func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { @@ -80,7 +80,10 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result } r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -105,7 +108,10 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall } r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -125,7 +131,10 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16 } r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -136,7 +145,10 @@ func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) { } r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -156,7 +168,10 @@ func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **u } r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -176,7 +191,10 @@ func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result } r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -196,7 +214,10 @@ func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result } r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -216,7 +237,10 @@ func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **u } r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -236,7 +260,10 @@ func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result ** } r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -256,7 +283,10 @@ func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint } r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -276,7 +306,10 @@ func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, res } r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -287,7 +320,10 @@ func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, } r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -298,7 +334,10 @@ func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) { } r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -318,7 +357,10 @@ func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, proce } r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -329,7 +371,10 @@ func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, re } r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -340,7 +385,10 @@ func hcsCloseProcess(process hcsProcess) (hr error) { } r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -351,7 +399,33 @@ func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) { } r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSignalProcess(process, _p0, result) +} + +func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -362,7 +436,10 @@ func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInforma } r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -373,7 +450,10 @@ func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, res } r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -393,7 +473,10 @@ func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (h } r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -413,7 +496,10 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result } r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -424,7 +510,10 @@ func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context ui } r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -435,7 +524,10 @@ func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) { } r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go index c8d362c66..921c2c855 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -36,10 +36,6 @@ func New(err error, title, rest string) error { return &HcsError{title, rest, err} } -func Errorf(err error, title, format string, a ...interface{}) error { - return New(err, title, fmt.Sprintf(format, a...)) -} - func Win32FromError(err error) uint32 { if herr, ok := err.(*HcsError); ok { return Win32FromError(herr.Err) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index ce636458c..59ec7004c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -23,7 +23,9 @@ type HNSEndpoint struct { DisableICC bool `json:",omitempty"` PrefixLength uint8 `json:",omitempty"` IsRemoteEndpoint bool `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` Namespace *Namespace `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` } //SystemType represents the type of the system on which actions are done diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go index ff7369e6f..31322a681 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go @@ -20,6 +20,7 @@ type ELBPolicy struct { SourceVIP string `json:"SourceVIP,omitempty"` VIPs []string `json:"VIPs,omitempty"` ILB bool `json:"ILB,omitempty"` + DSR bool `json:"IsDSR,omitempty"` } // LBPolicy is a structure defining schema for LoadBalancing based Policy diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go index 863e3429c..204633a48 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated mksyscall_windows.exe DO NOT EDIT package hns @@ -6,7 +6,6 @@ import ( "syscall" "unsafe" - "github.com/Microsoft/hcsshim/internal/interop" "golang.org/x/sys/windows" ) @@ -68,7 +67,10 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) } r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go index f10c88d08..2f6ec029e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -5,9 +5,9 @@ import ( "unsafe" ) -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go interop.go +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go -//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree +//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree func ConvertAndFreeCoTaskMemString(buffer *uint16) string { str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go index 2f5bf8f55..12b0c71c5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -1,4 +1,4 @@ -// Code generated by 'go generate'; DO NOT EDIT. +// Code generated mksyscall_windows.exe DO NOT EDIT package interop @@ -37,9 +37,9 @@ func errnoErr(e syscall.Errno) error { } var ( - modole32 = windows.NewLazySystemDLL("ole32.dll") + modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") ) func coTaskMemFree(buffer unsafe.Pointer) { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go new file mode 100644 index 000000000..a1527d706 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -0,0 +1,37 @@ +package logfields + +const ( + // Identifiers + + ContainerID = "cid" + UVMID = "uvm-id" + ProcessID = "pid" + + // Common Misc + + // Timeout represents an operation timeout. + Timeout = "timeout" + JSON = "json" + + // Keys/values + + Field = "field" + OCIAnnotation = "oci-annotation" + Value = "value" + + // Golang type's + + ExpectedType = "expected-type" + Bool = "bool" + Uint32 = "uint32" + Uint64 = "uint64" + + // HCS + + HCSOperation = "hcs-op" + HCSOperationResult = "hcs-op-result" + + // runhcs + + VMShimOperation = "vmshim-op" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go index 6fa3bbc73..995433ace 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go @@ -3,6 +3,8 @@ package schema1 import ( "encoding/json" "time" + + "github.com/Microsoft/hcsshim/internal/schema2" ) // ProcessConfig is used as both the input of Container.CreateProcess @@ -115,9 +117,10 @@ type ComputeSystemQuery struct { type PropertyType string const ( - PropertyTypeStatistics PropertyType = "Statistics" - PropertyTypeProcessList = "ProcessList" - PropertyTypeMappedVirtualDisk = "MappedVirtualDisk" + PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 + PropertyTypeProcessList = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 ) type PropertyQuery struct { @@ -142,6 +145,7 @@ type ContainerProperties struct { Statistics Statistics `json:",omitempty"` ProcessList []ProcessListItem `json:",omitempty"` MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` + GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` } // MemoryStats holds the memory statistics for a container @@ -206,6 +210,19 @@ type MappedVirtualDiskController struct { MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` } +// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM +type GuestDefinedCapabilities struct { + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` +} + +// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM +type GuestConnectionInfo struct { + SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` + ProtocolVersion uint32 `json:",omitempty"` + GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` +} + // Type of Request Support in ModifySystem type RequestType string diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go new file mode 100644 index 000000000..09456cbc2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go @@ -0,0 +1,31 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Attachment struct { + + Type_ string `json:"Type,omitempty"` + + Path string `json:"Path,omitempty"` + + IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` + + CachingMode string `json:"CachingMode,omitempty"` + + NoWriteHardening bool `json:"NoWriteHardening,omitempty"` + + DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` + + IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` + + CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go new file mode 100644 index 000000000..ecbbed4c2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Battery struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go new file mode 100644 index 000000000..243779eab --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CacheQueryStatsResponse struct { + + L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` + + L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` + + L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go new file mode 100644 index 000000000..ca75277a3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Chipset struct { + Uefi *Uefi `json:"Uefi,omitempty"` + + IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` + + BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` + + ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` + + ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` + + UseUtc bool `json:"UseUtc,omitempty"` + + // LinuxKernelDirect - Added in v2.2 Builds >=181117 + LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go new file mode 100644 index 000000000..88f01707a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CloseHandle struct { + + Handle string `json:"Handle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go new file mode 100644 index 000000000..c665be3d5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +type ComPort struct { + + NamedPipe string `json:"NamedPipe,omitempty"` + + OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go new file mode 100644 index 000000000..85785d285 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ComputeSystem struct { + + Owner string `json:"Owner,omitempty"` + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + HostedSystem *HostedSystem `json:"HostedSystem,omitempty"` + + Container *Container `json:"Container,omitempty"` + + VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` + + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go new file mode 100644 index 000000000..1a47db7d9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go @@ -0,0 +1,72 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "net/http" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKey takes an APIKey as authentication for the request + ContextAPIKey = contextKey("apikey") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +type Configuration struct { + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "https://localhost", + DefaultHeader: make(map[string]string), + UserAgent: "Swagger-Codegen/2.1.0/go", + } + return cfg +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go new file mode 100644 index 000000000..adbe07fe5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ConsoleSize struct { + + Height int32 `json:"Height,omitempty"` + + Width int32 `json:"Width,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go new file mode 100644 index 000000000..17dce28bc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go @@ -0,0 +1,35 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Container struct { + + GuestOs *GuestOs `json:"GuestOs,omitempty"` + + Storage *Storage `json:"Storage,omitempty"` + + MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` + + MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` + + Memory *Memory `json:"Memory,omitempty"` + + Processor *Processor `json:"Processor,omitempty"` + + Networking *Networking `json:"Networking,omitempty"` + + HvSocket *HvSocket `json:"HvSocket,omitempty"` + + ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + AssignedDevices []Device `json:"AssignedDevices,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go new file mode 100644 index 000000000..0f8f64437 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardState struct { + + // Authentication cookie for calls to a Container Credential Guard instance. + Cookie string `json:"Cookie,omitempty"` + + // Name of the RPC endpoint of the Container Credential Guard instance. + RpcEndpoint string `json:"RpcEndpoint,omitempty"` + + // Transport used for the configured Container Credential Guard instance. + Transport string `json:"Transport,omitempty"` + + // Credential spec used for the configured Container Credential Guard instance. + CredentialSpec string `json:"CredentialSpec,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go new file mode 100644 index 000000000..754797e21 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// memory usage as viewed from within the container +type ContainerMemoryInformation struct { + + TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` + + TotalUsage int32 `json:"TotalUsage,omitempty"` + + CommittedBytes int32 `json:"CommittedBytes,omitempty"` + + SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` + + CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` + + PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go new file mode 100644 index 000000000..ca319bbbc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Device struct { + + // The interface class guid of the device to assign to container. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go new file mode 100644 index 000000000..b2191c571 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go @@ -0,0 +1,43 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Devices struct { + + ComPorts map[string]ComPort `json:"ComPorts,omitempty"` + + Scsi map[string]Scsi `json:"Scsi,omitempty"` + + VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` + + NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` + + VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` + + Keyboard *Keyboard `json:"Keyboard,omitempty"` + + Mouse *Mouse `json:"Mouse,omitempty"` + + HvSocket *HvSocket2 `json:"HvSocket,omitempty"` + + EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` + + GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` + + VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` + + Plan9 *Plan9 `json:"Plan9,omitempty"` + + Battery *Battery `json:"Battery,omitempty"` + + FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` + + SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go new file mode 100644 index 000000000..4fe592f71 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type EnhancedModeVideo struct { + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go new file mode 100644 index 000000000..51011afe4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type FlexibleIoDevice struct { + + EmulatorId string `json:"EmulatorId,omitempty"` + + HostingModel string `json:"HostingModel,omitempty"` + + Configuration []string `json:"Configuration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go new file mode 100644 index 000000000..7db29495b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestConnection struct { + + // Use Vsock rather than Hyper-V sockets to communicate with the guest service. + UseVsock bool `json:"UseVsock,omitempty"` + + // Don't disconnect the guest connection when pausing the virtual machine. + UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go new file mode 100644 index 000000000..8a369bab7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Information about the guest. +type GuestConnectionInfo struct { + + // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. + SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` + + ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` + + GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go new file mode 100644 index 000000000..c5fa76735 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestCrashReporting struct { + + WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go new file mode 100644 index 000000000..c708fc7c3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestOs struct { + + HostName string `json:"HostName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go new file mode 100644 index 000000000..ef1eec886 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestState struct { + + // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. + GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. + RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` + + // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. + ForceTransientState bool `json:"ForceTransientState,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go new file mode 100644 index 000000000..0797584c5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HostedSystem struct { + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + Container *Container `json:"Container,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go new file mode 100644 index 000000000..ef9ffb8dd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocket struct { + + Config *HvSocketSystemConfig `json:"Config,omitempty"` + + EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go new file mode 100644 index 000000000..a19ba15c1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// HvSocket configuration for a VM +type HvSocket2 struct { + + HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go new file mode 100644 index 000000000..a848e91e6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocketServiceConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` + + // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors + AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go new file mode 100644 index 000000000..69f4f9d39 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +type HvSocketSystemConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). + DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. + DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` + + ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go new file mode 100644 index 000000000..3d3fa3b1c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Keyboard struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go new file mode 100644 index 000000000..b63b8ef12 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Layer struct { + + Id string `json:"Id,omitempty"` + + Path string `json:"Path,omitempty"` + + PathType string `json:"PathType,omitempty"` + + // Unspecified defaults to Enabled + Cache string `json:"Cache,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go new file mode 100644 index 000000000..0ab6c280f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.2 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LinuxKernelDirect struct { + KernelFilePath string `json:"KernelFilePath,omitempty"` + + InitRdPath string `json:"InitRdPath,omitempty"` + + KernelCmdLine string `json:"KernelCmdLine,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go new file mode 100644 index 000000000..a823a6d3b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedDirectory struct { + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` + + ContainerPath string `json:"ContainerPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go new file mode 100644 index 000000000..2d1d2604a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedPipe struct { + + ContainerPipeName string `json:"ContainerPipeName,omitempty"` + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go new file mode 100644 index 000000000..e1d135a3a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory struct { + + SizeInMB int32 `json:"SizeInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go new file mode 100644 index 000000000..27d0b8c48 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB int32 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go new file mode 100644 index 000000000..bdd87dffd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MemoryInformationForVm struct { + + VirtualNodeCount int32 `json:"VirtualNodeCount,omitempty"` + + VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` + + VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go new file mode 100644 index 000000000..6214970f6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Memory runtime statistics +type MemoryStats struct { + + MemoryUsageCommitBytes int32 `json:"MemoryUsageCommitBytes,omitempty"` + + MemoryUsageCommitPeakBytes int32 `json:"MemoryUsageCommitPeakBytes,omitempty"` + + MemoryUsagePrivateWorkingSetBytes int32 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go new file mode 100644 index 000000000..d29455a3e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModifySettingRequest struct { + ResourcePath string `json:"ResourcePath,omitempty"` + + RequestType string `json:"RequestType,omitempty"` + + Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated + + GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go new file mode 100644 index 000000000..ccf8b938f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Mouse struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go new file mode 100644 index 000000000..c586f66c2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NetworkAdapter struct { + + EndpointId string `json:"EndpointId,omitempty"` + + MacAddress string `json:"MacAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go new file mode 100644 index 000000000..12c47827c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go @@ -0,0 +1,24 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Networking struct { + + AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` + + DnsSearchList string `json:"DnsSearchList,omitempty"` + + NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` + + // Guid in windows; string in linux + Namespace string `json:"Namespace,omitempty"` + + NetworkAdapters []string `json:"NetworkAdapters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go new file mode 100644 index 000000000..1cd70d179 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Notification data that is indicated to components running in the Virtual Machine. +type PauseNotification struct { + + Reason string `json:"Reason,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go new file mode 100644 index 000000000..780a5cae2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Options for HcsPauseComputeSystem +type PauseOptions struct { + + SuspensionLevel string `json:"SuspensionLevel,omitempty"` + + HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go new file mode 100644 index 000000000..705c677e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9 struct { + + Shares []Plan9Share `json:"Shares,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go new file mode 100644 index 000000000..eb171817a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go @@ -0,0 +1,33 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9Share struct { + + Name string `json:"Name,omitempty"` + + // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. + AccessName string `json:"AccessName,omitempty"` + + Path string `json:"Path,omitempty"` + + Port int32 `json:"Port,omitempty"` + + // Flags are marked private. Until they are exported correctly + // + // ReadOnly 0x00000001 + // LinuxMetadata 0x00000004 + // CaseSensitive 0x00000008 + Flags int32 `json:"Flags,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go new file mode 100644 index 000000000..63e0b7f8f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go @@ -0,0 +1,34 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Information about a process running in a container +type ProcessDetails struct { + + ProcessId int32 `json:"ProcessId,omitempty"` + + ImageName string `json:"ImageName,omitempty"` + + CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` + + UserTime100ns int32 `json:"UserTime100ns,omitempty"` + + KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` + + MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` + + MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` + + MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go new file mode 100644 index 000000000..29bc2e3d0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Passed to HcsRpc_ModifyProcess +type ProcessModifyRequest struct { + + Operation string `json:"Operation,omitempty"` + + ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` + + CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go new file mode 100644 index 000000000..470c55734 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go @@ -0,0 +1,47 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessParameters struct { + + ApplicationName string `json:"ApplicationName,omitempty"` + + CommandLine string `json:"CommandLine,omitempty"` + + // optional alternative to CommandLine, currently only supported by Linux GCS + CommandArgs []string `json:"CommandArgs,omitempty"` + + User string `json:"User,omitempty"` + + WorkingDirectory string `json:"WorkingDirectory,omitempty"` + + Environment map[string]string `json:"Environment,omitempty"` + + // if set, will run as low-privilege process + RestrictedToken bool `json:"RestrictedToken,omitempty"` + + // if set, ignore StdErrPipe + EmulateConsole bool `json:"EmulateConsole,omitempty"` + + CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` + + CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` + + CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` + + // height then width + ConsoleSize []int32 `json:"ConsoleSize,omitempty"` + + // if set, find an existing session for the user and create the process in it + UseExistingLogin bool `json:"UseExistingLogin,omitempty"` + + // if set, use the legacy console instead of conhost + UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go new file mode 100644 index 000000000..20793d150 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Status of a process running in a container +type ProcessStatus struct { + + ProcessId int32 `json:"ProcessId,omitempty"` + + Exited bool `json:"Exited,omitempty"` + + ExitCode int32 `json:"ExitCode,omitempty"` + + LastWaitResult int32 `json:"LastWaitResult,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go new file mode 100644 index 000000000..7a60b0245 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor struct { + + Count int32 `json:"Count,omitempty"` + + Maximum int32 `json:"Maximum,omitempty"` + + Weight int32 `json:"Weight,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go new file mode 100644 index 000000000..40d3e7356 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor2 struct { + + Count int32 `json:"Count,omitempty"` + + Limit int32 `json:"Limit,omitempty"` + + Weight int32 `json:"Weight,omitempty"` + + ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go new file mode 100644 index 000000000..9d3b77e57 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU runtime statistics +type ProcessorStats struct { + + TotalRuntime100ns int32 `json:"TotalRuntime100ns,omitempty"` + + RuntimeUser100ns int32 `json:"RuntimeUser100ns,omitempty"` + + RuntimeKernel100ns int32 `json:"RuntimeKernel100ns,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go new file mode 100644 index 000000000..6db2a48f6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go @@ -0,0 +1,47 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Properties struct { + + Id string `json:"Id,omitempty"` + + SystemType string `json:"SystemType,omitempty"` + + RuntimeOsType string `json:"RuntimeOsType,omitempty"` + + Name string `json:"Name,omitempty"` + + Owner string `json:"Owner,omitempty"` + + RuntimeId string `json:"RuntimeId,omitempty"` + + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` + + State string `json:"State,omitempty"` + + Stopped bool `json:"Stopped,omitempty"` + + ExitType string `json:"ExitType,omitempty"` + + Memory *MemoryInformationForVm `json:"Memory,omitempty"` + + Statistics *Statistics `json:"Statistics,omitempty"` + + ProcessList []ProcessDetails `json:"ProcessList,omitempty"` + + TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` + + GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go new file mode 100644 index 000000000..22b92ffdf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// By default the basic properties will be returned. This query provides a way to request specific properties. +type PropertyQuery struct { + + PropertyTypes []string `json:"PropertyTypes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go new file mode 100644 index 000000000..97e453128 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RdpConnectionOptions struct { + + AccessSids []string `json:"AccessSids,omitempty"` + + NamedPipe string `json:"NamedPipe,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go new file mode 100644 index 000000000..fa574ccc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryChanges struct { + + AddValues []RegistryValue `json:"AddValues,omitempty"` + + DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go new file mode 100644 index 000000000..fab03bc60 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryKey struct { + + Hive string `json:"Hive,omitempty"` + + Name string `json:"Name,omitempty"` + + Volatile bool `json:"Volatile,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go new file mode 100644 index 000000000..1589f4841 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go @@ -0,0 +1,31 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryValue struct { + + Key *RegistryKey `json:"Key,omitempty"` + + Name string `json:"Name,omitempty"` + + Type_ string `json:"Type,omitempty"` + + // One and only one value type must be set. + StringValue string `json:"StringValue,omitempty"` + + BinaryValue string `json:"BinaryValue,omitempty"` + + DWordValue int32 `json:"DWordValue,omitempty"` + + QWordValue int32 `json:"QWordValue,omitempty"` + + // Only used if RegistryValueType is CustomType The data is in BinaryValue + CustomType int32 `json:"CustomType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go new file mode 100644 index 000000000..778ff5873 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RestoreState struct { + + // The path to the save state file to restore the system from. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` + + // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. + TemplateSystemId string `json:"TemplateSystemId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go new file mode 100644 index 000000000..e55fa1d98 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SaveOptions struct { + + // The type of save operation to be performed. + SaveType string `json:"SaveType,omitempty"` + + // The path to the file that will container the saved state. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go new file mode 100644 index 000000000..bf253a470 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Scsi struct { + + // Map of attachments, where the key is the integer LUN number on the controller. + Attachments map[string]Attachment `json:"Attachments,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go new file mode 100644 index 000000000..bd573f6cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryConfiguration struct { + + Regions []SharedMemoryRegion `json:"Regions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go new file mode 100644 index 000000000..a57b2cba7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegion struct { + + SectionName string `json:"SectionName,omitempty"` + + StartOffset int32 `json:"StartOffset,omitempty"` + + Length int32 `json:"Length,omitempty"` + + AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` + + HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go new file mode 100644 index 000000000..d9a50cc7d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegionInfo struct { + + SectionName string `json:"SectionName,omitempty"` + + GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go new file mode 100644 index 000000000..599c06e8a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Silo job information +type SiloProperties struct { + + Enabled bool `json:"Enabled,omitempty"` + + JobName string `json:"JobName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go new file mode 100644 index 000000000..5cb3ed93b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go @@ -0,0 +1,30 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Runtime statistics for a container +type Statistics struct { + + Timestamp time.Time `json:"Timestamp,omitempty"` + + ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` + + Uptime100ns int32 `json:"Uptime100ns,omitempty"` + + Processor *ProcessorStats `json:"Processor,omitempty"` + + Memory *MemoryStats `json:"Memory,omitempty"` + + Storage *StorageStats `json:"Storage,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go new file mode 100644 index 000000000..2627af913 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Storage struct { + + // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. + Layers []Layer `json:"Layers,omitempty"` + + // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. + Path string `json:"Path,omitempty"` + + QoS *StorageQoS `json:"QoS,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go new file mode 100644 index 000000000..8c5255df1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type StorageQoS struct { + + IopsMaximum int32 `json:"IopsMaximum,omitempty"` + + BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go new file mode 100644 index 000000000..198ea57d7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Storage runtime statistics +type StorageStats struct { + + ReadCountNormalized int32 `json:"ReadCountNormalized,omitempty"` + + ReadSizeBytes int32 `json:"ReadSizeBytes,omitempty"` + + WriteCountNormalized int32 `json:"WriteCountNormalized,omitempty"` + + WriteSizeBytes int32 `json:"WriteSizeBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go new file mode 100644 index 000000000..af2e3c823 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Topology struct { + + Memory *Memory2 `json:"Memory,omitempty"` + + Processor *Processor2 `json:"Processor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go new file mode 100644 index 000000000..ba91178f9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Uefi struct { + + EnableDebugger bool `json:"EnableDebugger,omitempty"` + + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` + + BootThis *UefiBootEntry `json:"BootThis,omitempty"` + + Console string `json:"Console,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go new file mode 100644 index 000000000..6620fb2bc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type UefiBootEntry struct { + + DeviceType string `json:"DeviceType,omitempty"` + + DevicePath string `json:"DevicePath,omitempty"` + + DiskNumber int32 `json:"DiskNumber,omitempty"` + + OptionalData string `json:"OptionalData,omitempty"` + + VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go new file mode 100644 index 000000000..62c0e4d12 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Version struct { + + Major int32 `json:"Major,omitempty"` + + Minor int32 `json:"Minor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go new file mode 100644 index 000000000..0958e5606 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VideoMonitor struct { + + HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` + + VerticalResolution int32 `json:"VerticalResolution,omitempty"` + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go new file mode 100644 index 000000000..2d22b1bcb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go @@ -0,0 +1,32 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachine struct { + + // StopOnReset is private in the schema. If regenerated need to put back. + StopOnReset bool `json:"StopOnReset,omitempty"` + + Chipset *Chipset `json:"Chipset,omitempty"` + + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + + Devices *Devices `json:"Devices,omitempty"` + + GuestState *GuestState `json:"GuestState,omitempty"` + + RestoreState *RestoreState `json:"RestoreState,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go new file mode 100644 index 000000000..48402d8ec --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualNodeInfo struct { + + VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` + + PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` + + VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` + + MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go new file mode 100644 index 000000000..f5b7f3e38 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemController struct { + Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` + + MaximumCount uint32 `json:"MaximumCount,omitempty"` + + MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` + + Backing string `json:"Backing,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go new file mode 100644 index 000000000..47714444a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemDevice struct { + + HostPath string `json:"HostPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go new file mode 100644 index 000000000..76131b3a7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmb struct { + + Shares []VirtualSmbShare `json:"Shares,omitempty"` + + DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go new file mode 100644 index 000000000..b50098a42 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShare struct { + + Name string `json:"Name,omitempty"` + + Path string `json:"Path,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` + + Options *VirtualSmbShareOptions `json:"Options,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go new file mode 100644 index 000000000..c1894279d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go @@ -0,0 +1,63 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShareOptions struct { + + ReadOnly bool `json:"ReadOnly,omitempty"` + + // convert exclusive access to shared read access + ShareRead bool `json:"ShareRead,omitempty"` + + // all opens will use cached I/O + CacheIo bool `json:"CacheIo,omitempty"` + + // disable oplock support + NoOplocks bool `json:"NoOplocks,omitempty"` + + // Acquire the backup privilege when attempting to open + TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` + + // Use the identity of the share root when opening + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + // disable Direct Mapping + NoDirectmap bool `json:"NoDirectmap,omitempty"` + + // disable Byterange locks + NoLocks bool `json:"NoLocks,omitempty"` + + // disable Directory CHange Notifications + NoDirnotify bool `json:"NoDirnotify,omitempty"` + + // share is use for VM shared memory + VmSharedMemory bool `json:"VmSharedMemory,omitempty"` + + // allow access only to the files specified in AllowedFiles + RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` + + // disable all oplocks except Level II + ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` + + // Allow the host to reparse this base layer + ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` + + // Enable pseudo-oplocks + PseudoOplocks bool `json:"PseudoOplocks,omitempty"` + + // All opens will use non-cached IO + NonCacheIo bool `json:"NonCacheIo,omitempty"` + + // Enable pseudo directory change notifications + PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` + + // Block directory enumeration, renames, and deletes. + SingleFileMapping bool `json:"SingleFileMapping,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go new file mode 100644 index 000000000..39f628667 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VmMemory struct { + + AvailableMemory int32 `json:"AvailableMemory,omitempty"` + + AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` + + ReservedMemory int32 `json:"ReservedMemory,omitempty"` + + AssignedMemory int32 `json:"AssignedMemory,omitempty"` + + SlpActive bool `json:"SlpActive,omitempty"` + + BalancingEnabled bool `json:"BalancingEnabled,omitempty"` + + DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go new file mode 100644 index 000000000..cf632bbc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type WindowsCrashReporting struct { + + DumpFileName string `json:"DumpFileName,omitempty"` + + MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go index 3a0d4bc58..dcb919268 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -9,17 +9,24 @@ import ( // For a read/write layer, the mounted filesystem will appear as a volume on the // host, while a read-only layer is generally expected to be a no-op. // An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(path string) error { - title := "hcsshim::ActivateLayer " - logrus.Debugf(title+"path %s", path) +func ActivateLayer(path string) (err error) { + title := "hcsshim::ActivateLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := activateLayer(&stdDriverInfo, path) + err = activateLayer(&stdDriverInfo, path) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+" - succeeded path=%s", path) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go index d15817730..be2bc3fd6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -7,17 +7,25 @@ import ( // CreateLayer creates a new, empty, read-only layer on the filesystem based on // the parent layer provided. -func CreateLayer(path, parent string) error { - title := "hcsshim::CreateLayer " - logrus.Debugf(title+"ID %s parent %s", path, parent) +func CreateLayer(path, parent string) (err error) { + title := "hcsshim::CreateLayer" + fields := logrus.Fields{ + "parent": parent, + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := createLayer(&stdDriverInfo, path, parent) + err = createLayer(&stdDriverInfo, path, parent) if err != nil { - err = hcserror.Errorf(err, title, "path=%s parent=%s", path, parent) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"- succeeded path=%s parent=%s", path, parent) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go index bf2fece19..7e3351289 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -9,9 +9,20 @@ import ( // This requires both the id of the direct parent layer, as well as the full list // of paths to all parent layers up to the base (and including the direct parent // whose id was provided). -func CreateScratchLayer(path string, parentLayerPaths []string) error { - title := "hcsshim::CreateScratchLayer " - logrus.Debugf(title+"path %s", path) +func CreateScratchLayer(path string, parentLayerPaths []string) (err error) { + title := "hcsshim::CreateScratchLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() // Generate layer descriptors layers, err := layerPathsToDescriptors(parentLayerPaths) @@ -21,11 +32,7 @@ func CreateScratchLayer(path string, parentLayerPaths []string) error { err = createSandboxLayer(&stdDriverInfo, path, 0, layers) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"- succeeded path=%s", path) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go index b998f8a19..2dd5d5715 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -6,17 +6,24 @@ import ( ) // DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(path string) error { - title := "hcsshim::DeactivateLayer " - logrus.Debugf(title+"path %s", path) +func DeactivateLayer(path string) (err error) { + title := "hcsshim::DeactivateLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := deactivateLayer(&stdDriverInfo, path) + err = deactivateLayer(&stdDriverInfo, path) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return err + return hcserror.New(err, title+"- failed", "") } - - logrus.Debugf(title+"succeeded path=%s", path) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go index dc14cecc4..4da690c20 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -7,17 +7,24 @@ import ( // DestroyLayer will remove the on-disk files representing the layer with the given // path, including that layer's containing folder, if any. -func DestroyLayer(path string) error { - title := "hcsshim::DestroyLayer " - logrus.Debugf(title+"path %s", path) +func DestroyLayer(path string) (err error) { + title := "hcsshim::DestroyLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := destroyLayer(&stdDriverInfo, path) + err = destroyLayer(&stdDriverInfo, path) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"succeeded path=%s", path) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index 7832bb452..651676fb2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -6,17 +6,25 @@ import ( ) // ExpandScratchSize expands the size of a layer to at least size bytes. -func ExpandScratchSize(path string, size uint64) error { - title := "hcsshim::ExpandScratchSize " - logrus.Debugf(title+"path=%s size=%d", path, size) +func ExpandScratchSize(path string, size uint64) (err error) { + title := "hcsshim::ExpandScratchSize" + fields := logrus.Fields{ + "path": path, + "size": size, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := expandSandboxSize(&stdDriverInfo, path, size) + err = expandSandboxSize(&stdDriverInfo, path, size) if err != nil { - err = hcserror.Errorf(err, title, "path=%s size=%d", path, size) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"- succeeded path=%s size=%d", path, size) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go index c6b3480ce..0425b3395 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -1,14 +1,11 @@ package wclayer import ( - "io" "io/ioutil" "os" - "syscall" "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" ) @@ -17,9 +14,21 @@ import ( // format includes any metadata required for later importing the layer (using // ImportLayer), and requires the full list of parent layer paths in order to // perform the export. -func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) error { - title := "hcsshim::ExportLayer " - logrus.Debugf(title+"path %s folder %s", path, exportFolderPath) +func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ExportLayer" + fields := logrus.Fields{ + "path": path, + "exportFolderPath": exportFolderPath, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() // Generate layer descriptors layers, err := layerPathsToDescriptors(parentLayerPaths) @@ -29,12 +38,8 @@ func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) if err != nil { - err = hcserror.Errorf(err, title, "path=%s folder=%s", path, exportFolderPath) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"succeeded path=%s folder=%s", path, exportFolderPath) return nil } @@ -44,96 +49,20 @@ type LayerReader interface { Close() error } -// FilterLayerReader provides an interface for extracting the contents of an on-disk layer. -type FilterLayerReader struct { - context uintptr -} - -// Next reads the next available file from a layer, ensuring that parent directories are always read -// before child files and directories. -// -// Next returns the file's relative path, size, and basic file metadata. Read() should be used to -// extract a Win32 backup stream with the remainder of the metadata and the data. -func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) { - var fileNamep *uint16 - fileInfo := &winio.FileBasicInfo{} - var deleted uint32 - var fileSize int64 - err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted) - if err != nil { - if err == syscall.ERROR_NO_MORE_FILES { - err = io.EOF - } else { - err = hcserror.New(err, "ExportLayerNext", "") - } - return "", 0, nil, err - } - fileName := interop.ConvertAndFreeCoTaskMemString(fileNamep) - if deleted != 0 { - fileInfo = nil - } - if fileName[0] == '\\' { - fileName = fileName[1:] - } - return fileName, fileSize, fileInfo, nil -} - -// Read reads from the current file's Win32 backup stream. -func (r *FilterLayerReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := exportLayerRead(r.context, b, &bytesRead) - if err != nil { - return 0, hcserror.New(err, "ExportLayerRead", "") - } - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees resources associated with the layer reader. It will return an -// error if there was an error while reading the layer or of the layer was not -// completely read. -func (r *FilterLayerReader) Close() (err error) { - if r.context != 0 { - err = exportLayerEnd(r.context) - if err != nil { - err = hcserror.New(err, "ExportLayerEnd", "") - } - r.context = 0 - } - return -} - // NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. // The caller must have taken the SeBackupPrivilege privilege // to call this and any methods on the resulting LayerReader. func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) { - if procExportLayerBegin.Find() != nil { - // The new layer reader is not available on this Windows build. Fall back to the - // legacy export code path. - exportPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - err = ExportLayer(path, exportPath, parentLayerPaths) - if err != nil { - os.RemoveAll(exportPath) - return nil, err - } - return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil - } - - layers, err := layerPathsToDescriptors(parentLayerPaths) + exportPath, err := ioutil.TempDir("", "hcs") if err != nil { return nil, err } - r := &FilterLayerReader{} - err = exportLayerBegin(&stdDriverInfo, path, layers, &r.context) + err = ExportLayer(path, exportPath, parentLayerPaths) if err != nil { - return nil, hcserror.New(err, "ExportLayerBegin", "") + os.RemoveAll(exportPath) + return nil, err } - return r, err + return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil } type legacyLayerReaderWrapper struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go index 8c37549a0..d60b6ed53 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -11,20 +11,29 @@ import ( // the path at which that layer can be accessed. This path may be a volume path // if the layer is a mounted read-write layer, otherwise it is expected to be the // folder path at which the layer is stored. -func GetLayerMountPath(path string) (string, error) { - title := "hcsshim::GetLayerMountPath " - logrus.Debugf(title+"path %s", path) +func GetLayerMountPath(path string) (_ string, err error) { + title := "hcsshim::GetLayerMountPath" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() var mountPathLength uintptr mountPathLength = 0 // Call the procedure itself. - logrus.Debugf("Calling proc (1)") - err := getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) + logrus.WithFields(fields).Debug("Calling proc (1)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) if err != nil { - err = hcserror.Errorf(err, title, "(first call) path=%s", path) - logrus.Error(err) - return "", err + return "", hcserror.New(err, title+" - failed", "(first call)") } // Allocate a mount path of the returned length. @@ -35,15 +44,13 @@ func GetLayerMountPath(path string) (string, error) { mountPathp[0] = 0 // Call the procedure again - logrus.Debugf("Calling proc (2)") + logrus.WithFields(fields).Debug("Calling proc (2)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) if err != nil { - err = hcserror.Errorf(err, title, "(second call) path=%s", path) - logrus.Error(err) - return "", err + return "", hcserror.New(err, title+" - failed", "(second call)") } mountPath := syscall.UTF16ToString(mountPathp[0:]) - logrus.Debugf(title+"succeeded path=%s mountPath=%s", path, mountPath) + fields["mountPath"] = mountPath return mountPath, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go index 10899c68a..dbd83ef2b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -10,17 +10,20 @@ import ( // image store and return descriptive info about those images for the purpose // of registering them with the graphdriver, graph, and tagstore. func GetSharedBaseImages() (imageData string, err error) { - title := "hcsshim::GetSharedBaseImages " + title := "hcsshim::GetSharedBaseImages" + logrus.Debug(title) + defer func() { + if err != nil { + logrus.WithError(err).Error(err) + } else { + logrus.WithField("imageData", imageData).Debug(title + " - succeeded") + } + }() - logrus.Debugf("Calling proc") var buffer *uint16 err = getBaseImages(&buffer) if err != nil { - err = hcserror.New(err, title, "") - logrus.Error(err) - return + return "", hcserror.New(err, title+" - failed", "") } - imageData = interop.ConvertAndFreeCoTaskMemString(buffer) - logrus.Debugf(title+" - succeeded output=%s", imageData) - return + return interop.ConvertAndFreeCoTaskMemString(buffer), nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go index d86e67827..05735df6c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -1,24 +1,30 @@ package wclayer import ( - "fmt" - "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/sirupsen/logrus" ) // GrantVmAccess adds access to a file for a given VM -func GrantVmAccess(vmid string, filepath string) error { - title := fmt.Sprintf("hcsshim::GrantVmAccess id:%s path:%s ", vmid, filepath) - logrus.Debugf(title) +func GrantVmAccess(vmid string, filepath string) (err error) { + title := "hcsshim::GrantVmAccess" + fields := logrus.Fields{ + "vm-id": vmid, + "path": filepath, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := grantVmAccess(vmid, filepath) + err = grantVmAccess(vmid, filepath) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", filepath) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title + " - succeeded") return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go index c978450f8..76a804f2a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -1,7 +1,6 @@ package wclayer import ( - "errors" "io/ioutil" "os" "path/filepath" @@ -16,9 +15,21 @@ import ( // that into a layer with the id layerId. Note that in order to correctly populate // the layer and interperet the transport format, all parent layers must already // be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) error { - title := "hcsshim::ImportLayer " - logrus.Debugf(title+"path %s folder %s", path, importFolderPath) +func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ImportLayer" + fields := logrus.Fields{ + "path": path, + "importFolderPath": importFolderPath, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() // Generate layer descriptors layers, err := layerPathsToDescriptors(parentLayerPaths) @@ -28,12 +39,8 @@ func ImportLayer(path string, importFolderPath string, parentLayerPaths []string err = importLayer(&stdDriverInfo, path, importFolderPath, layers) if err != nil { - err = hcserror.Errorf(err, title, "path=%s folder=%s", path, importFolderPath) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"succeeded path=%s folder=%s", path, importFolderPath) return nil } @@ -52,69 +59,6 @@ type LayerWriter interface { Close() error } -// FilterLayerWriter provides an interface to write the contents of a layer to the file system. -type FilterLayerWriter struct { - context uintptr -} - -// Add adds a file or directory to the layer. The file's parent directory must have already been added. -// -// name contains the file's relative path. fileInfo contains file times and file attributes; the rest -// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method. -// winio.BackupStreamWriter can be used to facilitate this. -func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - if name[0] != '\\' { - name = `\` + name - } - err := importLayerNext(w.context, name, fileInfo) - if err != nil { - return hcserror.New(err, "ImportLayerNext", "") - } - return nil -} - -// AddLink adds a hard link to the layer. The target of the link must have already been added. -func (w *FilterLayerWriter) AddLink(name string, target string) error { - return errors.New("hard links not yet supported") -} - -// Remove removes a file from the layer. The file must have been present in the parent layer. -// -// name contains the file's relative path. -func (w *FilterLayerWriter) Remove(name string) error { - if name[0] != '\\' { - name = `\` + name - } - err := importLayerNext(w.context, name, nil) - if err != nil { - return hcserror.New(err, "ImportLayerNext", "") - } - return nil -} - -// Write writes more backup stream data to the current file. -func (w *FilterLayerWriter) Write(b []byte) (int, error) { - err := importLayerWrite(w.context, b) - if err != nil { - err = hcserror.New(err, "ImportLayerWrite", "") - return 0, err - } - return len(b), err -} - -// Close completes the layer write operation. The error must be checked to ensure that the -// operation was successful. -func (w *FilterLayerWriter) Close() (err error) { - if w.context != 0 { - err = importLayerEnd(w.context) - if err != nil { - err = hcserror.New(err, "ImportLayerEnd", "") - } - w.context = 0 - } - return -} - type legacyLayerWriterWrapper struct { *legacyLayerWriter path string @@ -175,32 +119,17 @@ func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) }, nil } - if procImportLayerBegin.Find() != nil { - // The new layer reader is not available on this Windows build. Fall back to the - // legacy export code path. - importPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) - if err != nil { - return nil, err - } - return &legacyLayerWriterWrapper{ - legacyLayerWriter: w, - path: importPath, - parentLayerPaths: parentLayerPaths, - }, nil - } - layers, err := layerPathsToDescriptors(parentLayerPaths) + importPath, err := ioutil.TempDir("", "hcs") if err != nil { return nil, err } - - w := &FilterLayerWriter{} - err = importLayerBegin(&stdDriverInfo, path, layers, &w.context) + w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) if err != nil { - return nil, hcserror.New(err, "ImportLayerStart", "") + return nil, err } - return w, nil + return &legacyLayerWriterWrapper{ + legacyLayerWriter: w, + path: importPath, + parentLayerPaths: parentLayerPaths, + }, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go index 71287ff8a..258167a57 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -7,19 +7,27 @@ import ( // LayerExists will return true if a layer with the given id exists and is known // to the system. -func LayerExists(path string) (bool, error) { - title := "hcsshim::LayerExists " - logrus.Debugf(title+"path %s", path) +func LayerExists(path string) (_ bool, err error) { + title := "hcsshim::LayerExists" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() // Call the procedure itself. var exists uint32 - err := layerExists(&stdDriverInfo, path, &exists) + err = layerExists(&stdDriverInfo, path, &exists) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return false, err + return false, hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"succeeded path=%s exists=%d", path, exists) + fields["layer-exists"] = exists != 0 return exists != 0, nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go index a1b8b9882..6d0ae8a07 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -75,13 +75,13 @@ func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, for i := 0; i < len(parentLayerPaths); i++ { g, err := LayerID(parentLayerPaths[i]) if err != nil { - logrus.Debugf("Failed to convert name to guid %s", err) + logrus.WithError(err).Debug("Failed to convert name to guid") return nil, err } p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) if err != nil { - logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err) + logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") return nil, err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go index 741994ba4..45a63cf65 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -10,15 +10,25 @@ import ( // Host Compute Service, ensuring GUIDs generated with the same string are common // across all clients. func NameToGuid(name string) (id guid.GUID, err error) { - title := "hcsshim::NameToGuid " + title := "hcsshim::NameToGuid" + fields := logrus.Fields{ + "name": name, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() err = nameToGuid(name, &id) if err != nil { - err = hcserror.Errorf(err, title, "name=%s", name) - logrus.Error(err) + err = hcserror.New(err, title+" - failed", "") return } - - logrus.Debugf(title+"name:%s guid:%s", name, id.String()) + fields["guid"] = id.String() return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go index bd4005dc4..2b65b0186 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -14,9 +14,20 @@ var prepareLayerLock sync.Mutex // parent layers, and is necessary in order to view or interact with the layer // as an actual filesystem (reading and writing files, creating directories, etc). // Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(path string, parentLayerPaths []string) error { - title := "hcsshim::PrepareLayer " - logrus.Debugf(title+"path %s", path) +func PrepareLayer(path string, parentLayerPaths []string) (err error) { + title := "hcsshim::PrepareLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() // Generate layer descriptors layers, err := layerPathsToDescriptors(parentLayerPaths) @@ -30,11 +41,7 @@ func PrepareLayer(path string, parentLayerPaths []string) error { defer prepareLayerLock.Unlock() err = prepareLayer(&stdDriverInfo, path, layers) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"succeeded path=%s", path) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go index 5f1b4f4f4..bccd45969 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -7,17 +7,24 @@ import ( // UnprepareLayer disables the filesystem filter for the read-write layer with // the given id. -func UnprepareLayer(path string) error { - title := "hcsshim::UnprepareLayer " - logrus.Debugf(title+"path %s", path) +func UnprepareLayer(path string) (err error) { + title := "hcsshim::UnprepareLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() - err := unprepareLayer(&stdDriverInfo, path) + err = unprepareLayer(&stdDriverInfo, path) if err != nil { - err = hcserror.Errorf(err, title, "path=%s", path) - logrus.Error(err) - return err + return hcserror.New(err, title+" - failed", "") } - - logrus.Debugf(title+"succeeded path=%s", path) return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go index 768a6f2f1..78f2aacd8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -2,7 +2,7 @@ package wclayer import "github.com/Microsoft/hcsshim/internal/guid" -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go -winio wclayer.go +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go //sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? //sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? @@ -22,16 +22,6 @@ import "github.com/Microsoft/hcsshim/internal/guid" //sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? //sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? -//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin? -//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext? -//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite? -//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd? - -//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin? -//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext? -//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead? -//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd? - //sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go index cb813aa3d..d853ab259 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated mksyscall_windows.exe DO NOT EDIT package wclayer @@ -6,8 +6,6 @@ import ( "syscall" "unsafe" - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/interop" "golang.org/x/sys/windows" ) @@ -58,14 +56,6 @@ var ( procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") - procImportLayerBegin = modvmcompute.NewProc("ImportLayerBegin") - procImportLayerNext = modvmcompute.NewProc("ImportLayerNext") - procImportLayerWrite = modvmcompute.NewProc("ImportLayerWrite") - procImportLayerEnd = modvmcompute.NewProc("ImportLayerEnd") - procExportLayerBegin = modvmcompute.NewProc("ExportLayerBegin") - procExportLayerNext = modvmcompute.NewProc("ExportLayerNext") - procExportLayerRead = modvmcompute.NewProc("ExportLayerRead") - procExportLayerEnd = modvmcompute.NewProc("ExportLayerEnd") procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") ) @@ -84,7 +74,10 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -113,7 +106,10 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC } r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -138,7 +134,10 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -162,7 +161,10 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto } r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -182,7 +184,10 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { } r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -202,7 +207,10 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -222,7 +230,10 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -251,7 +262,10 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L } r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -271,7 +285,10 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u } r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -282,7 +299,10 @@ func getBaseImages(buffer **uint16) (hr error) { } r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -311,7 +331,10 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L } r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -331,7 +354,10 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { } r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -351,7 +377,10 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) { } r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -375,7 +404,10 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT } r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -395,7 +427,10 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -415,7 +450,10 @@ func _processBaseImage(path *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -435,138 +473,10 @@ func _processUtilityImage(path *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _importLayerBegin(info, _p0, descriptors, context) -} - -func _importLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procImportLayerBegin.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procImportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(fileName) - if hr != nil { - return - } - return _importLayerNext(context, _p0, fileInfo) -} - -func _importLayerNext(context uintptr, fileName *uint16, fileInfo *winio.FileBasicInfo) (hr error) { - if hr = procImportLayerNext.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procImportLayerNext.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo))) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func importLayerWrite(context uintptr, buffer []byte) (hr error) { - var _p0 *byte - if len(buffer) > 0 { - _p0 = &buffer[0] - } - if hr = procImportLayerWrite.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procImportLayerWrite.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer))) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func importLayerEnd(context uintptr) (hr error) { - if hr = procImportLayerEnd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procImportLayerEnd.Addr(), 1, uintptr(context), 0, 0) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _exportLayerBegin(info, _p0, descriptors, context) -} - -func _exportLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procExportLayerBegin.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) { - if hr = procExportLayerNext.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayerNext.Addr(), 5, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)), uintptr(unsafe.Pointer(fileSize)), uintptr(unsafe.Pointer(deleted)), 0) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) { - var _p0 *byte - if len(buffer) > 0 { - _p0 = &buffer[0] - } - if hr = procExportLayerRead.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayerRead.Addr(), 4, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)), uintptr(unsafe.Pointer(bytesRead)), 0, 0) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) - } - return -} - -func exportLayerEnd(context uintptr) (hr error) { - if hr = procExportLayerEnd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procExportLayerEnd.Addr(), 1, uintptr(context), 0, 0) - if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } @@ -591,7 +501,10 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { } r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go index 8cdc247dc..df0e63bbd 100644 --- a/vendor/github.com/Microsoft/hcsshim/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -5,7 +5,6 @@ import ( "path/filepath" "github.com/Microsoft/hcsshim/internal/guid" - "github.com/Microsoft/hcsshim/internal/wclayer" ) @@ -19,6 +18,7 @@ func ActivateLayer(info DriverInfo, id string) error { func CreateLayer(info DriverInfo, id, parent string) error { return wclayer.CreateLayer(layerPath(&info, id), parent) } + // New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) @@ -32,6 +32,7 @@ func DeactivateLayer(info DriverInfo, id string) error { func DestroyLayer(info DriverInfo, id string) error { return wclayer.DestroyLayer(layerPath(&info, id)) } + // New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) @@ -72,9 +73,6 @@ type DriverInfo struct { HomeDir string } -type FilterLayerReader = wclayer.FilterLayerReader -type FilterLayerWriter = wclayer.FilterLayerWriter - type GUID [16]byte func NameToGuid(name string) (id GUID, err error) { diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go index 821979334..7647734de 100644 --- a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go @@ -300,7 +300,10 @@ func (r *Rets) SetErrorCode() string { %s = %sErrno(r0) }` const hrCode = `if int32(r0) < 0 { - %s = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + %s = %sErrno(r0) }` if r.Name == "" && !r.ReturnsError { return "" @@ -310,7 +313,7 @@ func (r *Rets) SetErrorCode() string { } if r.Type == "error" { if r.Name == "hr" { - return fmt.Sprintf(hrCode, r.Name) + return fmt.Sprintf(hrCode, r.Name, syscalldot()) } else { return fmt.Sprintf(code, r.Name, syscalldot()) } @@ -773,7 +776,6 @@ func (src *Source) Generate(w io.Writer) error { src.ExternalImport("golang.org/x/sys/windows") } } - src.ExternalImport("github.com/Microsoft/hcsshim/internal/interop") if *winio { src.ExternalImport("github.com/Microsoft/go-winio") } @@ -788,6 +790,9 @@ func (src *Source) Generate(w io.Writer) error { if !*systemDLL { return syscalldot() + "NewLazyDLL(" + arg + ")" } + if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") { + arg = strings.Replace(arg, "_", "-", -1) + } switch pkgtype { case pkgStd: return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" @@ -847,7 +852,7 @@ func main() { // TODO: use println instead to print in the following template const srcTemplate = ` -{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT package {{packagename}} diff --git a/vendor/github.com/Microsoft/hcsshim/vendor.conf b/vendor/github.com/Microsoft/hcsshim/vendor.conf new file mode 100644 index 000000000..6e0ed1566 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/vendor.conf @@ -0,0 +1,21 @@ +github.com/blang/semver v3.1.0 +github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 +github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 +github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f +github.com/konsorten/go-windows-terminal-sequences v1.0.1 +github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c +github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2 +github.com/Microsoft/opengcs v0.3.9 +github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 +github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88 +github.com/pkg/errors v0.8.1 +github.com/sirupsen/logrus v1.3.0 +github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 +github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c +github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6 +github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b +github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874 +golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 +golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f +golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/version.go b/vendor/github.com/Microsoft/hcsshim/version.go deleted file mode 100644 index 9ebb257b3..000000000 --- a/vendor/github.com/Microsoft/hcsshim/version.go +++ /dev/null @@ -1,6 +0,0 @@ -package hcsshim - -// IsTP4 returns whether the currently running Windows build is at least TP4. -func IsTP4() bool { - return false -} diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go index cd471295b..8bed84857 100644 --- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated mksyscall_windows.exe DO NOT EDIT package hcsshim @@ -6,7 +6,6 @@ import ( "syscall" "unsafe" - "github.com/Microsoft/hcsshim/internal/interop" "golang.org/x/sys/windows" ) @@ -46,7 +45,10 @@ var ( func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) if int32(r0) < 0 { - hr = interop.Win32FromHresult(r0) + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/Nvveen/Gotty/LICENSE deleted file mode 100644 index 0b71c9736..000000000 --- a/vendor/github.com/Nvveen/Gotty/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/Nvveen/Gotty/README deleted file mode 100644 index a6b0d9a8f..000000000 --- a/vendor/github.com/Nvveen/Gotty/README +++ /dev/null @@ -1,5 +0,0 @@ -Gotty is a library written in Go that determines and reads termcap database -files to produce an interface for interacting with the capabilities of a -terminal. -See the godoc documentation or the source code for more information about -function usage. diff --git a/vendor/github.com/Nvveen/Gotty/TODO b/vendor/github.com/Nvveen/Gotty/TODO deleted file mode 100644 index 470460531..000000000 --- a/vendor/github.com/Nvveen/Gotty/TODO +++ /dev/null @@ -1,3 +0,0 @@ -gotty.go:// TODO add more concurrency to name lookup, look for more opportunities. -all:// TODO add more documentation, with function usage in a doc.go file. -all:// TODO add more testing/benchmarking with go test. diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/Nvveen/Gotty/attributes.go deleted file mode 100644 index a4c005fae..000000000 --- a/vendor/github.com/Nvveen/Gotty/attributes.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -// Boolean capabilities -var BoolAttr = [...]string{ - "auto_left_margin", "bw", - "auto_right_margin", "am", - "no_esc_ctlc", "xsb", - "ceol_standout_glitch", "xhp", - "eat_newline_glitch", "xenl", - "erase_overstrike", "eo", - "generic_type", "gn", - "hard_copy", "hc", - "has_meta_key", "km", - "has_status_line", "hs", - "insert_null_glitch", "in", - "memory_above", "da", - "memory_below", "db", - "move_insert_mode", "mir", - "move_standout_mode", "msgr", - "over_strike", "os", - "status_line_esc_ok", "eslok", - "dest_tabs_magic_smso", "xt", - "tilde_glitch", "hz", - "transparent_underline", "ul", - "xon_xoff", "nxon", - "needs_xon_xoff", "nxon", - "prtr_silent", "mc5i", - "hard_cursor", "chts", - "non_rev_rmcup", "nrrmc", - "no_pad_char", "npc", - "non_dest_scroll_region", "ndscr", - "can_change", "ccc", - "back_color_erase", "bce", - "hue_lightness_saturation", "hls", - "col_addr_glitch", "xhpa", - "cr_cancels_micro_mode", "crxm", - "has_print_wheel", "daisy", - "row_addr_glitch", "xvpa", - "semi_auto_right_margin", "sam", - "cpi_changes_res", "cpix", - "lpi_changes_res", "lpix", - "backspaces_with_bs", "", - "crt_no_scrolling", "", - "no_correctly_working_cr", "", - "gnu_has_meta_key", "", - "linefeed_is_newline", "", - "has_hardware_tabs", "", - "return_does_clr_eol", "", -} - -// Numerical capabilities -var NumAttr = [...]string{ - "columns", "cols", - "init_tabs", "it", - "lines", "lines", - "lines_of_memory", "lm", - "magic_cookie_glitch", "xmc", - "padding_baud_rate", "pb", - "virtual_terminal", "vt", - "width_status_line", "wsl", - "num_labels", "nlab", - "label_height", "lh", - "label_width", "lw", - "max_attributes", "ma", - "maximum_windows", "wnum", - "max_colors", "colors", - "max_pairs", "pairs", - "no_color_video", "ncv", - "buffer_capacity", "bufsz", - "dot_vert_spacing", "spinv", - "dot_horz_spacing", "spinh", - "max_micro_address", "maddr", - "max_micro_jump", "mjump", - "micro_col_size", "mcs", - "micro_line_size", "mls", - "number_of_pins", "npins", - "output_res_char", "orc", - "output_res_line", "orl", - "output_res_horz_inch", "orhi", - "output_res_vert_inch", "orvi", - "print_rate", "cps", - "wide_char_size", "widcs", - "buttons", "btns", - "bit_image_entwining", "bitwin", - "bit_image_type", "bitype", - "magic_cookie_glitch_ul", "", - "carriage_return_delay", "", - "new_line_delay", "", - "backspace_delay", "", - "horizontal_tab_delay", "", - "number_of_function_keys", "", -} - -// String capabilities -var StrAttr = [...]string{ - "back_tab", "cbt", - "bell", "bel", - "carriage_return", "cr", - "change_scroll_region", "csr", - "clear_all_tabs", "tbc", - "clear_screen", "clear", - "clr_eol", "el", - "clr_eos", "ed", - "column_address", "hpa", - "command_character", "cmdch", - "cursor_address", "cup", - "cursor_down", "cud1", - "cursor_home", "home", - "cursor_invisible", "civis", - "cursor_left", "cub1", - "cursor_mem_address", "mrcup", - "cursor_normal", "cnorm", - "cursor_right", "cuf1", - "cursor_to_ll", "ll", - "cursor_up", "cuu1", - "cursor_visible", "cvvis", - "delete_character", "dch1", - "delete_line", "dl1", - "dis_status_line", "dsl", - "down_half_line", "hd", - "enter_alt_charset_mode", "smacs", - "enter_blink_mode", "blink", - "enter_bold_mode", "bold", - "enter_ca_mode", "smcup", - "enter_delete_mode", "smdc", - "enter_dim_mode", "dim", - "enter_insert_mode", "smir", - "enter_secure_mode", "invis", - "enter_protected_mode", "prot", - "enter_reverse_mode", "rev", - "enter_standout_mode", "smso", - "enter_underline_mode", "smul", - "erase_chars", "ech", - "exit_alt_charset_mode", "rmacs", - "exit_attribute_mode", "sgr0", - "exit_ca_mode", "rmcup", - "exit_delete_mode", "rmdc", - "exit_insert_mode", "rmir", - "exit_standout_mode", "rmso", - "exit_underline_mode", "rmul", - "flash_screen", "flash", - "form_feed", "ff", - "from_status_line", "fsl", - "init_1string", "is1", - "init_2string", "is2", - "init_3string", "is3", - "init_file", "if", - "insert_character", "ich1", - "insert_line", "il1", - "insert_padding", "ip", - "key_backspace", "kbs", - "key_catab", "ktbc", - "key_clear", "kclr", - "key_ctab", "kctab", - "key_dc", "kdch1", - "key_dl", "kdl1", - "key_down", "kcud1", - "key_eic", "krmir", - "key_eol", "kel", - "key_eos", "ked", - "key_f0", "kf0", - "key_f1", "kf1", - "key_f10", "kf10", - "key_f2", "kf2", - "key_f3", "kf3", - "key_f4", "kf4", - "key_f5", "kf5", - "key_f6", "kf6", - "key_f7", "kf7", - "key_f8", "kf8", - "key_f9", "kf9", - "key_home", "khome", - "key_ic", "kich1", - "key_il", "kil1", - "key_left", "kcub1", - "key_ll", "kll", - "key_npage", "knp", - "key_ppage", "kpp", - "key_right", "kcuf1", - "key_sf", "kind", - "key_sr", "kri", - "key_stab", "khts", - "key_up", "kcuu1", - "keypad_local", "rmkx", - "keypad_xmit", "smkx", - "lab_f0", "lf0", - "lab_f1", "lf1", - "lab_f10", "lf10", - "lab_f2", "lf2", - "lab_f3", "lf3", - "lab_f4", "lf4", - "lab_f5", "lf5", - "lab_f6", "lf6", - "lab_f7", "lf7", - "lab_f8", "lf8", - "lab_f9", "lf9", - "meta_off", "rmm", - "meta_on", "smm", - "newline", "_glitch", - "pad_char", "npc", - "parm_dch", "dch", - "parm_delete_line", "dl", - "parm_down_cursor", "cud", - "parm_ich", "ich", - "parm_index", "indn", - "parm_insert_line", "il", - "parm_left_cursor", "cub", - "parm_right_cursor", "cuf", - "parm_rindex", "rin", - "parm_up_cursor", "cuu", - "pkey_key", "pfkey", - "pkey_local", "pfloc", - "pkey_xmit", "pfx", - "print_screen", "mc0", - "prtr_off", "mc4", - "prtr_on", "mc5", - "repeat_char", "rep", - "reset_1string", "rs1", - "reset_2string", "rs2", - "reset_3string", "rs3", - "reset_file", "rf", - "restore_cursor", "rc", - "row_address", "mvpa", - "save_cursor", "row_address", - "scroll_forward", "ind", - "scroll_reverse", "ri", - "set_attributes", "sgr", - "set_tab", "hts", - "set_window", "wind", - "tab", "s_magic_smso", - "to_status_line", "tsl", - "underline_char", "uc", - "up_half_line", "hu", - "init_prog", "iprog", - "key_a1", "ka1", - "key_a3", "ka3", - "key_b2", "kb2", - "key_c1", "kc1", - "key_c3", "kc3", - "prtr_non", "mc5p", - "char_padding", "rmp", - "acs_chars", "acsc", - "plab_norm", "pln", - "key_btab", "kcbt", - "enter_xon_mode", "smxon", - "exit_xon_mode", "rmxon", - "enter_am_mode", "smam", - "exit_am_mode", "rmam", - "xon_character", "xonc", - "xoff_character", "xoffc", - "ena_acs", "enacs", - "label_on", "smln", - "label_off", "rmln", - "key_beg", "kbeg", - "key_cancel", "kcan", - "key_close", "kclo", - "key_command", "kcmd", - "key_copy", "kcpy", - "key_create", "kcrt", - "key_end", "kend", - "key_enter", "kent", - "key_exit", "kext", - "key_find", "kfnd", - "key_help", "khlp", - "key_mark", "kmrk", - "key_message", "kmsg", - "key_move", "kmov", - "key_next", "knxt", - "key_open", "kopn", - "key_options", "kopt", - "key_previous", "kprv", - "key_print", "kprt", - "key_redo", "krdo", - "key_reference", "kref", - "key_refresh", "krfr", - "key_replace", "krpl", - "key_restart", "krst", - "key_resume", "kres", - "key_save", "ksav", - "key_suspend", "kspd", - "key_undo", "kund", - "key_sbeg", "kBEG", - "key_scancel", "kCAN", - "key_scommand", "kCMD", - "key_scopy", "kCPY", - "key_screate", "kCRT", - "key_sdc", "kDC", - "key_sdl", "kDL", - "key_select", "kslt", - "key_send", "kEND", - "key_seol", "kEOL", - "key_sexit", "kEXT", - "key_sfind", "kFND", - "key_shelp", "kHLP", - "key_shome", "kHOM", - "key_sic", "kIC", - "key_sleft", "kLFT", - "key_smessage", "kMSG", - "key_smove", "kMOV", - "key_snext", "kNXT", - "key_soptions", "kOPT", - "key_sprevious", "kPRV", - "key_sprint", "kPRT", - "key_sredo", "kRDO", - "key_sreplace", "kRPL", - "key_sright", "kRIT", - "key_srsume", "kRES", - "key_ssave", "kSAV", - "key_ssuspend", "kSPD", - "key_sundo", "kUND", - "req_for_input", "rfi", - "key_f11", "kf11", - "key_f12", "kf12", - "key_f13", "kf13", - "key_f14", "kf14", - "key_f15", "kf15", - "key_f16", "kf16", - "key_f17", "kf17", - "key_f18", "kf18", - "key_f19", "kf19", - "key_f20", "kf20", - "key_f21", "kf21", - "key_f22", "kf22", - "key_f23", "kf23", - "key_f24", "kf24", - "key_f25", "kf25", - "key_f26", "kf26", - "key_f27", "kf27", - "key_f28", "kf28", - "key_f29", "kf29", - "key_f30", "kf30", - "key_f31", "kf31", - "key_f32", "kf32", - "key_f33", "kf33", - "key_f34", "kf34", - "key_f35", "kf35", - "key_f36", "kf36", - "key_f37", "kf37", - "key_f38", "kf38", - "key_f39", "kf39", - "key_f40", "kf40", - "key_f41", "kf41", - "key_f42", "kf42", - "key_f43", "kf43", - "key_f44", "kf44", - "key_f45", "kf45", - "key_f46", "kf46", - "key_f47", "kf47", - "key_f48", "kf48", - "key_f49", "kf49", - "key_f50", "kf50", - "key_f51", "kf51", - "key_f52", "kf52", - "key_f53", "kf53", - "key_f54", "kf54", - "key_f55", "kf55", - "key_f56", "kf56", - "key_f57", "kf57", - "key_f58", "kf58", - "key_f59", "kf59", - "key_f60", "kf60", - "key_f61", "kf61", - "key_f62", "kf62", - "key_f63", "kf63", - "clr_bol", "el1", - "clear_margins", "mgc", - "set_left_margin", "smgl", - "set_right_margin", "smgr", - "label_format", "fln", - "set_clock", "sclk", - "display_clock", "dclk", - "remove_clock", "rmclk", - "create_window", "cwin", - "goto_window", "wingo", - "hangup", "hup", - "dial_phone", "dial", - "quick_dial", "qdial", - "tone", "tone", - "pulse", "pulse", - "flash_hook", "hook", - "fixed_pause", "pause", - "wait_tone", "wait", - "user0", "u0", - "user1", "u1", - "user2", "u2", - "user3", "u3", - "user4", "u4", - "user5", "u5", - "user6", "u6", - "user7", "u7", - "user8", "u8", - "user9", "u9", - "orig_pair", "op", - "orig_colors", "oc", - "initialize_color", "initc", - "initialize_pair", "initp", - "set_color_pair", "scp", - "set_foreground", "setf", - "set_background", "setb", - "change_char_pitch", "cpi", - "change_line_pitch", "lpi", - "change_res_horz", "chr", - "change_res_vert", "cvr", - "define_char", "defc", - "enter_doublewide_mode", "swidm", - "enter_draft_quality", "sdrfq", - "enter_italics_mode", "sitm", - "enter_leftward_mode", "slm", - "enter_micro_mode", "smicm", - "enter_near_letter_quality", "snlq", - "enter_normal_quality", "snrmq", - "enter_shadow_mode", "sshm", - "enter_subscript_mode", "ssubm", - "enter_superscript_mode", "ssupm", - "enter_upward_mode", "sum", - "exit_doublewide_mode", "rwidm", - "exit_italics_mode", "ritm", - "exit_leftward_mode", "rlm", - "exit_micro_mode", "rmicm", - "exit_shadow_mode", "rshm", - "exit_subscript_mode", "rsubm", - "exit_superscript_mode", "rsupm", - "exit_upward_mode", "rum", - "micro_column_address", "mhpa", - "micro_down", "mcud1", - "micro_left", "mcub1", - "micro_right", "mcuf1", - "micro_row_address", "mvpa", - "micro_up", "mcuu1", - "order_of_pins", "porder", - "parm_down_micro", "mcud", - "parm_left_micro", "mcub", - "parm_right_micro", "mcuf", - "parm_up_micro", "mcuu", - "select_char_set", "scs", - "set_bottom_margin", "smgb", - "set_bottom_margin_parm", "smgbp", - "set_left_margin_parm", "smglp", - "set_right_margin_parm", "smgrp", - "set_top_margin", "smgt", - "set_top_margin_parm", "smgtp", - "start_bit_image", "sbim", - "start_char_set_def", "scsd", - "stop_bit_image", "rbim", - "stop_char_set_def", "rcsd", - "subscript_characters", "subcs", - "superscript_characters", "supcs", - "these_cause_cr", "docr", - "zero_motion", "zerom", - "char_set_names", "csnm", - "key_mouse", "kmous", - "mouse_info", "minfo", - "req_mouse_pos", "reqmp", - "get_mouse", "getm", - "set_a_foreground", "setaf", - "set_a_background", "setab", - "pkey_plab", "pfxl", - "device_type", "devt", - "code_set_init", "csin", - "set0_des_seq", "s0ds", - "set1_des_seq", "s1ds", - "set2_des_seq", "s2ds", - "set3_des_seq", "s3ds", - "set_lr_margin", "smglr", - "set_tb_margin", "smgtb", - "bit_image_repeat", "birep", - "bit_image_newline", "binel", - "bit_image_carriage_return", "bicr", - "color_names", "colornm", - "define_bit_image_region", "defbi", - "end_bit_image_region", "endbi", - "set_color_band", "setcolor", - "set_page_length", "slines", - "display_pc_char", "dispc", - "enter_pc_charset_mode", "smpch", - "exit_pc_charset_mode", "rmpch", - "enter_scancode_mode", "smsc", - "exit_scancode_mode", "rmsc", - "pc_term_options", "pctrm", - "scancode_escape", "scesc", - "alt_scancode_esc", "scesa", - "enter_horizontal_hl_mode", "ehhlm", - "enter_left_hl_mode", "elhlm", - "enter_low_hl_mode", "elohlm", - "enter_right_hl_mode", "erhlm", - "enter_top_hl_mode", "ethlm", - "enter_vertical_hl_mode", "evhlm", - "set_a_attributes", "sgr1", - "set_pglen_inch", "slength", - "termcap_init2", "", - "termcap_reset", "", - "linefeed_if_not_lf", "", - "backspace_if_not_bs", "", - "other_non_function_keys", "", - "arrow_key_map", "", - "acs_ulcorner", "", - "acs_llcorner", "", - "acs_urcorner", "", - "acs_lrcorner", "", - "acs_ltee", "", - "acs_rtee", "", - "acs_btee", "", - "acs_ttee", "", - "acs_hline", "", - "acs_vline", "", - "acs_plus", "", - "memory_lock", "", - "memory_unlock", "", - "box_chars_1", "", -} diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go deleted file mode 100644 index 093cbf37e..000000000 --- a/vendor/github.com/Nvveen/Gotty/gotty.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Gotty is a Go-package for reading and parsing the terminfo database -package gotty - -// TODO add more concurrency to name lookup, look for more opportunities. - -import ( - "encoding/binary" - "errors" - "fmt" - "os" - "reflect" - "strings" - "sync" -) - -// Open a terminfo file by the name given and construct a TermInfo object. -// If something went wrong reading the terminfo database file, an error is -// returned. -func OpenTermInfo(termName string) (*TermInfo, error) { - var term *TermInfo - var err error - // Find the environment variables - termloc := os.Getenv("TERMINFO") - if len(termloc) == 0 { - // Search like ncurses - locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/", - "/lib/terminfo/", "/usr/share/terminfo/"} - var path string - for _, str := range locations { - // Construct path - path = str + string(termName[0]) + "/" + termName - // Check if path can be opened - file, _ := os.Open(path) - if file != nil { - // Path can open, fall out and use current path - file.Close() - break - } - } - if len(path) > 0 { - term, err = readTermInfo(path) - } else { - err = errors.New(fmt.Sprintf("No terminfo file(-location) found")) - } - } - return term, err -} - -// Open a terminfo file from the environment variable containing the current -// terminal name and construct a TermInfo object. If something went wrong -// reading the terminfo database file, an error is returned. -func OpenTermInfoEnv() (*TermInfo, error) { - termenv := os.Getenv("TERM") - return OpenTermInfo(termenv) -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. -func (term *TermInfo) GetAttribute(attr string) (stacker, error) { - // Channel to store the main value in. - var value stacker - // Add a blocking WaitGroup - var block sync.WaitGroup - // Keep track of variable being written. - written := false - // Function to put into goroutine. - f := func(ats interface{}) { - var ok bool - var v stacker - // Switch on type of map to use and assign value to it. - switch reflect.TypeOf(ats).Elem().Kind() { - case reflect.Bool: - v, ok = ats.(map[string]bool)[attr] - case reflect.Int16: - v, ok = ats.(map[string]int16)[attr] - case reflect.String: - v, ok = ats.(map[string]string)[attr] - } - // If ok, a value is found, so we can write. - if ok { - value = v - written = true - } - // Goroutine is done - block.Done() - } - block.Add(3) - // Go for all 3 attribute lists. - go f(term.boolAttributes) - go f(term.numAttributes) - go f(term.strAttributes) - // Wait until every goroutine is done. - block.Wait() - // If a value has been written, return it. - if written { - return value, nil - } - // Otherwise, error. - return nil, fmt.Errorf("Erorr finding attribute") -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. A name is first converted to its termcap value. -func (term *TermInfo) GetAttributeName(name string) (stacker, error) { - tc := GetTermcapName(name) - return term.GetAttribute(tc) -} - -// A utility function that finds and returns the termcap equivalent of a -// variable name. -func GetTermcapName(name string) string { - // Termcap name - var tc string - // Blocking group - var wait sync.WaitGroup - // Function to put into a goroutine - f := func(attrs []string) { - // Find the string corresponding to the name - for i, s := range attrs { - if s == name { - tc = attrs[i+1] - } - } - // Goroutine is finished - wait.Done() - } - wait.Add(3) - // Go for all 3 attribute lists - go f(BoolAttr[:]) - go f(NumAttr[:]) - go f(StrAttr[:]) - // Wait until every goroutine is done - wait.Wait() - // Return the termcap name - return tc -} - -// This function takes a path to a terminfo file and reads it in binary -// form to construct the actual TermInfo file. -func readTermInfo(path string) (*TermInfo, error) { - // Open the terminfo file - file, err := os.Open(path) - defer file.Close() - if err != nil { - return nil, err - } - - // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize - // Header is composed of the magic 0432 octal number, size of the name - // section, size of the boolean section, the amount of number values, - // the number of offsets of strings, and the size of the string section. - var header [6]int16 - // Byte array is used to read in byte values - var byteArray []byte - // Short array is used to read in short values - var shArray []int16 - // TermInfo object to store values - var term TermInfo - - // Read in the header - err = binary.Read(file, binary.LittleEndian, &header) - if err != nil { - return nil, err - } - // If magic number isn't there or isn't correct, we have the wrong filetype - if header[0] != 0432 { - return nil, errors.New(fmt.Sprintf("Wrong filetype")) - } - - // Read in the names - byteArray = make([]byte, header[1]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.Names = strings.Split(string(byteArray), "|") - - // Read in the booleans - byteArray = make([]byte, header[2]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.boolAttributes = make(map[string]bool) - for i, b := range byteArray { - if b == 1 { - term.boolAttributes[BoolAttr[i*2+1]] = true - } - } - // If the number of bytes read is not even, a byte for alignment is added - if len(byteArray)%2 != 0 { - err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) - if err != nil { - return nil, err - } - } - - // Read in shorts - shArray = make([]int16, header[3]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - term.numAttributes = make(map[string]int16) - for i, n := range shArray { - if n != 0377 && n > -1 { - term.numAttributes[NumAttr[i*2+1]] = n - } - } - - // Read the offsets into the short array - shArray = make([]int16, header[4]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - // Read the actual strings in the byte array - byteArray = make([]byte, header[5]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.strAttributes = make(map[string]string) - // We get an offset, and then iterate until the string is null-terminated - for i, offset := range shArray { - if offset > -1 { - r := offset - for ; byteArray[r] != 0; r++ { - } - term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) - } - } - return &term, nil -} diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/Nvveen/Gotty/parser.go deleted file mode 100644 index a9d5d23c5..000000000 --- a/vendor/github.com/Nvveen/Gotty/parser.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var exp = [...]string{ - "%%", - "%c", - "%s", - "%p(\\d)", - "%P([A-z])", - "%g([A-z])", - "%'(.)'", - "%{([0-9]+)}", - "%l", - "%\\+|%-|%\\*|%/|%m", - "%&|%\\||%\\^", - "%=|%>|%<", - "%A|%O", - "%!|%~", - "%i", - "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", - "%\\?(.*?);", -} - -var regex *regexp.Regexp -var staticVar map[byte]stacker - -// Parses the attribute that is received with name attr and parameters params. -func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { - // Get the attribute name first. - iface, err := term.GetAttribute(attr) - str, ok := iface.(string) - if err != nil { - return "", err - } - if !ok { - return str, errors.New("Only string capabilities can be parsed.") - } - // Construct the hidden parser struct so we can use a recursive stack based - // parser. - ps := &parser{} - // Dynamic variables only exist in this context. - ps.dynamicVar = make(map[byte]stacker, 26) - ps.parameters = make([]stacker, len(params)) - // Convert the parameters to insert them into the parser struct. - for i, x := range params { - ps.parameters[i] = x - } - // Recursively walk and return. - result, err := ps.walk(str) - return result, err -} - -// Parses the attribute that is received with name attr and parameters params. -// Only works on full name of a capability that is given, which it uses to -// search for the termcap name. -func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { - tc := GetTermcapName(attr) - return term.Parse(tc, params) -} - -// Identify each token in a stack based manner and do the actual parsing. -func (ps *parser) walk(attr string) (string, error) { - // We use a buffer to get the modified string. - var buf bytes.Buffer - // Next, find and identify all tokens by their indices and strings. - tokens := regex.FindAllStringSubmatch(attr, -1) - if len(tokens) == 0 { - return attr, nil - } - indices := regex.FindAllStringIndex(attr, -1) - q := 0 // q counts the matches of one token - // Iterate through the string per character. - for i := 0; i < len(attr); i++ { - // If the current position is an identified token, execute the following - // steps. - if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { - // Switch on token. - switch { - case tokens[q][0][:2] == "%%": - // Literal percentage character. - buf.WriteByte('%') - case tokens[q][0][:2] == "%c": - // Pop a character. - c, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - buf.WriteByte(c.(byte)) - case tokens[q][0][:2] == "%s": - // Pop a string. - str, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := str.(string); !ok { - return buf.String(), errors.New("Stack head is not a string") - } - buf.WriteString(str.(string)) - case tokens[q][0][:2] == "%p": - // Push a parameter on the stack. - index, err := strconv.ParseInt(tokens[q][1], 10, 8) - index-- - if err != nil { - return buf.String(), err - } - if int(index) >= len(ps.parameters) { - return buf.String(), errors.New("Parameters index out of bound") - } - ps.st.push(ps.parameters[index]) - case tokens[q][0][:2] == "%P": - // Pop a variable from the stack as a dynamic or static variable. - val, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - index := tokens[q][2] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", - index) - return buf.String(), errors.New(errorStr) - } - // Specify either dynamic or static. - if index[0] >= 'a' && index[0] <= 'z' { - ps.dynamicVar[index[0]] = val - } else if index[0] >= 'A' && index[0] <= 'Z' { - staticVar[index[0]] = val - } - case tokens[q][0][:2] == "%g": - // Push a variable from the stack as a dynamic or static variable. - index := tokens[q][3] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid static variables index", - index) - return buf.String(), errors.New(errorStr) - } - var val stacker - if index[0] >= 'a' && index[0] <= 'z' { - val = ps.dynamicVar[index[0]] - } else if index[0] >= 'A' && index[0] <= 'Z' { - val = staticVar[index[0]] - } - ps.st.push(val) - case tokens[q][0][:2] == "%'": - // Push a character constant. - con := tokens[q][4] - if len(con) > 1 { - errorStr := fmt.Sprintf("%s is not a valid character constant", con) - return buf.String(), errors.New(errorStr) - } - ps.st.push(con[0]) - case tokens[q][0][:2] == "%{": - // Push an integer constant. - con, err := strconv.ParseInt(tokens[q][5], 10, 32) - if err != nil { - return buf.String(), err - } - ps.st.push(con) - case tokens[q][0][:2] == "%l": - // Push the length of the string that is popped from the stack. - popStr, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := popStr.(string); !ok { - errStr := fmt.Sprintf("Stack head is not a string") - return buf.String(), errors.New(errStr) - } - ps.st.push(len(popStr.(string))) - case tokens[q][0][:2] == "%?": - // If-then-else construct. First, the whole string is identified and - // then inside this substring, we can specify which parts to switch on. - ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") - ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) - var ( - ifStr string - err error - ) - // Parse the if-part to determine if-else. - if len(ifTokens[1]) > 0 { - ifStr, err = ps.walk(ifTokens[1]) - } else { // else - ifStr, err = ps.walk(ifTokens[4]) - } - // Return any errors - if err != nil { - return buf.String(), err - } else if len(ifStr) > 0 { - // Self-defined limitation, not sure if this is correct, but didn't - // seem like it. - return buf.String(), errors.New("If-clause cannot print statements") - } - var thenStr string - // Pop the first value that is set by parsing the if-clause. - choose, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // Switch to if or else. - if choose.(int) == 0 && len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[3]) - } else if choose.(int) != 0 { - if len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[2]) - } else { - thenStr, err = ps.walk(ifTokens[5]) - } - } - if err != nil { - return buf.String(), err - } - buf.WriteString(thenStr) - case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'x': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'X': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 's': - token := tokens[q][0] - // Remove the : that comes before a flag. - if token[1] == ':' { - token = token[:1] + token[2:] - } - digit, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // The rest is determined like the normal formatted prints. - digitStr := fmt.Sprintf(token, digit.(int)) - buf.WriteString(digitStr) - case tokens[q][0][:2] == "%i": - // Increment the parameters by one. - if len(ps.parameters) < 2 { - return buf.String(), errors.New("Not enough parameters to increment.") - } - val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) - val1++ - val2++ - ps.parameters[0], ps.parameters[1] = val1, val2 - default: - // The rest of the tokens is a special case, where two values are - // popped and then operated on by the token that comes after them. - op1, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - op2, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - var result stacker - switch tokens[q][0][:2] { - case "%+": - // Addition - result = op2.(int) + op1.(int) - case "%-": - // Subtraction - result = op2.(int) - op1.(int) - case "%*": - // Multiplication - result = op2.(int) * op1.(int) - case "%/": - // Division - result = op2.(int) / op1.(int) - case "%m": - // Modulo - result = op2.(int) % op1.(int) - case "%&": - // Bitwise AND - result = op2.(int) & op1.(int) - case "%|": - // Bitwise OR - result = op2.(int) | op1.(int) - case "%^": - // Bitwise XOR - result = op2.(int) ^ op1.(int) - case "%=": - // Equals - result = op2 == op1 - case "%>": - // Greater-than - result = op2.(int) > op1.(int) - case "%<": - // Lesser-than - result = op2.(int) < op1.(int) - case "%A": - // Logical AND - result = op2.(bool) && op1.(bool) - case "%O": - // Logical OR - result = op2.(bool) || op1.(bool) - case "%!": - // Logical complement - result = !op1.(bool) - case "%~": - // Bitwise complement - result = ^(op1.(int)) - } - ps.st.push(result) - } - - i = indices[q][1] - 1 - q++ - } else { - // We are not "inside" a token, so just skip until the end or the next - // token, and add all characters to the buffer. - j := i - if q != len(indices) { - for !(j >= indices[q][0] && j < indices[q][1]) { - j++ - } - } else { - j = len(attr) - } - buf.WriteString(string(attr[i:j])) - i = j - } - } - // Return the buffer as a string. - return buf.String(), nil -} - -// Push a stacker-value onto the stack. -func (st *stack) push(s stacker) { - *st = append(*st, s) -} - -// Pop a stacker-value from the stack. -func (st *stack) pop() (stacker, error) { - if len(*st) == 0 { - return nil, errors.New("Stack is empty.") - } - newStack := make(stack, len(*st)-1) - val := (*st)[len(*st)-1] - copy(newStack, (*st)[:len(*st)-1]) - *st = newStack - return val, nil -} - -// Initialize regexes and the static vars (that don't get changed between -// calls. -func init() { - // Initialize the main regex. - expStr := strings.Join(exp[:], "|") - regex, _ = regexp.Compile(expStr) - // Initialize the static variables. - staticVar = make(map[byte]stacker, 26) -} diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/Nvveen/Gotty/types.go deleted file mode 100644 index 9bcc65e9b..000000000 --- a/vendor/github.com/Nvveen/Gotty/types.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -type TermInfo struct { - boolAttributes map[string]bool - numAttributes map[string]int16 - strAttributes map[string]string - // The various names of the TermInfo file. - Names []string -} - -type stacker interface { -} -type stack []stacker - -type parser struct { - st stack - parameters []stacker - dynamicVar map[byte]stacker -} diff --git a/vendor/github.com/checkpoint-restore/go-criu/LICENSE b/vendor/github.com/checkpoint-restore/go-criu/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go b/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go new file mode 100644 index 000000000..230faace5 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go @@ -0,0 +1,1211 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: rpc/rpc.proto + +/* +Package rpc is a generated protocol buffer package. + +It is generated from these files: + rpc/rpc.proto + +It has these top-level messages: + CriuPageServerInfo + CriuVethPair + ExtMountMap + JoinNamespace + InheritFd + CgroupRoot + UnixSk + CriuOpts + CriuDumpResp + CriuRestoreResp + CriuNotify + CriuFeatures + CriuReq + CriuResp + CriuVersion +*/ +package rpc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CriuCgMode int32 + +const ( + CriuCgMode_IGNORE CriuCgMode = 0 + CriuCgMode_CG_NONE CriuCgMode = 1 + CriuCgMode_PROPS CriuCgMode = 2 + CriuCgMode_SOFT CriuCgMode = 3 + CriuCgMode_FULL CriuCgMode = 4 + CriuCgMode_STRICT CriuCgMode = 5 + CriuCgMode_DEFAULT CriuCgMode = 6 +) + +var CriuCgMode_name = map[int32]string{ + 0: "IGNORE", + 1: "CG_NONE", + 2: "PROPS", + 3: "SOFT", + 4: "FULL", + 5: "STRICT", + 6: "DEFAULT", +} +var CriuCgMode_value = map[string]int32{ + "IGNORE": 0, + "CG_NONE": 1, + "PROPS": 2, + "SOFT": 3, + "FULL": 4, + "STRICT": 5, + "DEFAULT": 6, +} + +func (x CriuCgMode) Enum() *CriuCgMode { + p := new(CriuCgMode) + *p = x + return p +} +func (x CriuCgMode) String() string { + return proto.EnumName(CriuCgMode_name, int32(x)) +} +func (x *CriuCgMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CriuCgMode_value, data, "CriuCgMode") + if err != nil { + return err + } + *x = CriuCgMode(value) + return nil +} +func (CriuCgMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type CriuReqType int32 + +const ( + CriuReqType_EMPTY CriuReqType = 0 + CriuReqType_DUMP CriuReqType = 1 + CriuReqType_RESTORE CriuReqType = 2 + CriuReqType_CHECK CriuReqType = 3 + CriuReqType_PRE_DUMP CriuReqType = 4 + CriuReqType_PAGE_SERVER CriuReqType = 5 + CriuReqType_NOTIFY CriuReqType = 6 + CriuReqType_CPUINFO_DUMP CriuReqType = 7 + CriuReqType_CPUINFO_CHECK CriuReqType = 8 + CriuReqType_FEATURE_CHECK CriuReqType = 9 + CriuReqType_VERSION CriuReqType = 10 + CriuReqType_WAIT_PID CriuReqType = 11 + CriuReqType_PAGE_SERVER_CHLD CriuReqType = 12 +) + +var CriuReqType_name = map[int32]string{ + 0: "EMPTY", + 1: "DUMP", + 2: "RESTORE", + 3: "CHECK", + 4: "PRE_DUMP", + 5: "PAGE_SERVER", + 6: "NOTIFY", + 7: "CPUINFO_DUMP", + 8: "CPUINFO_CHECK", + 9: "FEATURE_CHECK", + 10: "VERSION", + 11: "WAIT_PID", + 12: "PAGE_SERVER_CHLD", +} +var CriuReqType_value = map[string]int32{ + "EMPTY": 0, + "DUMP": 1, + "RESTORE": 2, + "CHECK": 3, + "PRE_DUMP": 4, + "PAGE_SERVER": 5, + "NOTIFY": 6, + "CPUINFO_DUMP": 7, + "CPUINFO_CHECK": 8, + "FEATURE_CHECK": 9, + "VERSION": 10, + "WAIT_PID": 11, + "PAGE_SERVER_CHLD": 12, +} + +func (x CriuReqType) Enum() *CriuReqType { + p := new(CriuReqType) + *p = x + return p +} +func (x CriuReqType) String() string { + return proto.EnumName(CriuReqType_name, int32(x)) +} +func (x *CriuReqType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CriuReqType_value, data, "CriuReqType") + if err != nil { + return err + } + *x = CriuReqType(value) + return nil +} +func (CriuReqType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type CriuPageServerInfo struct { + Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Pid *int32 `protobuf:"varint,3,opt,name=pid" json:"pid,omitempty"` + Fd *int32 `protobuf:"varint,4,opt,name=fd" json:"fd,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuPageServerInfo) Reset() { *m = CriuPageServerInfo{} } +func (m *CriuPageServerInfo) String() string { return proto.CompactTextString(m) } +func (*CriuPageServerInfo) ProtoMessage() {} +func (*CriuPageServerInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CriuPageServerInfo) GetAddress() string { + if m != nil && m.Address != nil { + return *m.Address + } + return "" +} + +func (m *CriuPageServerInfo) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *CriuPageServerInfo) GetPid() int32 { + if m != nil && m.Pid != nil { + return *m.Pid + } + return 0 +} + +func (m *CriuPageServerInfo) GetFd() int32 { + if m != nil && m.Fd != nil { + return *m.Fd + } + return 0 +} + +type CriuVethPair struct { + IfIn *string `protobuf:"bytes,1,req,name=if_in,json=ifIn" json:"if_in,omitempty"` + IfOut *string `protobuf:"bytes,2,req,name=if_out,json=ifOut" json:"if_out,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuVethPair) Reset() { *m = CriuVethPair{} } +func (m *CriuVethPair) String() string { return proto.CompactTextString(m) } +func (*CriuVethPair) ProtoMessage() {} +func (*CriuVethPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *CriuVethPair) GetIfIn() string { + if m != nil && m.IfIn != nil { + return *m.IfIn + } + return "" +} + +func (m *CriuVethPair) GetIfOut() string { + if m != nil && m.IfOut != nil { + return *m.IfOut + } + return "" +} + +type ExtMountMap struct { + Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + Val *string `protobuf:"bytes,2,req,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtMountMap) Reset() { *m = ExtMountMap{} } +func (m *ExtMountMap) String() string { return proto.CompactTextString(m) } +func (*ExtMountMap) ProtoMessage() {} +func (*ExtMountMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtMountMap) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *ExtMountMap) GetVal() string { + if m != nil && m.Val != nil { + return *m.Val + } + return "" +} + +type JoinNamespace struct { + Ns *string `protobuf:"bytes,1,req,name=ns" json:"ns,omitempty"` + NsFile *string `protobuf:"bytes,2,req,name=ns_file,json=nsFile" json:"ns_file,omitempty"` + ExtraOpt *string `protobuf:"bytes,3,opt,name=extra_opt,json=extraOpt" json:"extra_opt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *JoinNamespace) Reset() { *m = JoinNamespace{} } +func (m *JoinNamespace) String() string { return proto.CompactTextString(m) } +func (*JoinNamespace) ProtoMessage() {} +func (*JoinNamespace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *JoinNamespace) GetNs() string { + if m != nil && m.Ns != nil { + return *m.Ns + } + return "" +} + +func (m *JoinNamespace) GetNsFile() string { + if m != nil && m.NsFile != nil { + return *m.NsFile + } + return "" +} + +func (m *JoinNamespace) GetExtraOpt() string { + if m != nil && m.ExtraOpt != nil { + return *m.ExtraOpt + } + return "" +} + +type InheritFd struct { + Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + Fd *int32 `protobuf:"varint,2,req,name=fd" json:"fd,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InheritFd) Reset() { *m = InheritFd{} } +func (m *InheritFd) String() string { return proto.CompactTextString(m) } +func (*InheritFd) ProtoMessage() {} +func (*InheritFd) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *InheritFd) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *InheritFd) GetFd() int32 { + if m != nil && m.Fd != nil { + return *m.Fd + } + return 0 +} + +type CgroupRoot struct { + Ctrl *string `protobuf:"bytes,1,opt,name=ctrl" json:"ctrl,omitempty"` + Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CgroupRoot) Reset() { *m = CgroupRoot{} } +func (m *CgroupRoot) String() string { return proto.CompactTextString(m) } +func (*CgroupRoot) ProtoMessage() {} +func (*CgroupRoot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CgroupRoot) GetCtrl() string { + if m != nil && m.Ctrl != nil { + return *m.Ctrl + } + return "" +} + +func (m *CgroupRoot) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +type UnixSk struct { + Inode *uint32 `protobuf:"varint,1,req,name=inode" json:"inode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnixSk) Reset() { *m = UnixSk{} } +func (m *UnixSk) String() string { return proto.CompactTextString(m) } +func (*UnixSk) ProtoMessage() {} +func (*UnixSk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *UnixSk) GetInode() uint32 { + if m != nil && m.Inode != nil { + return *m.Inode + } + return 0 +} + +type CriuOpts struct { + ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd" json:"images_dir_fd,omitempty"` + Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"` + LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running,json=leaveRunning" json:"leave_running,omitempty"` + ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk,json=extUnixSk" json:"ext_unix_sk,omitempty"` + TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established,json=tcpEstablished" json:"tcp_established,omitempty"` + EvasiveDevices *bool `protobuf:"varint,6,opt,name=evasive_devices,json=evasiveDevices" json:"evasive_devices,omitempty"` + ShellJob *bool `protobuf:"varint,7,opt,name=shell_job,json=shellJob" json:"shell_job,omitempty"` + FileLocks *bool `protobuf:"varint,8,opt,name=file_locks,json=fileLocks" json:"file_locks,omitempty"` + LogLevel *int32 `protobuf:"varint,9,opt,name=log_level,json=logLevel,def=2" json:"log_level,omitempty"` + LogFile *string `protobuf:"bytes,10,opt,name=log_file,json=logFile" json:"log_file,omitempty"` + Ps *CriuPageServerInfo `protobuf:"bytes,11,opt,name=ps" json:"ps,omitempty"` + NotifyScripts *bool `protobuf:"varint,12,opt,name=notify_scripts,json=notifyScripts" json:"notify_scripts,omitempty"` + Root *string `protobuf:"bytes,13,opt,name=root" json:"root,omitempty"` + ParentImg *string `protobuf:"bytes,14,opt,name=parent_img,json=parentImg" json:"parent_img,omitempty"` + TrackMem *bool `protobuf:"varint,15,opt,name=track_mem,json=trackMem" json:"track_mem,omitempty"` + AutoDedup *bool `protobuf:"varint,16,opt,name=auto_dedup,json=autoDedup" json:"auto_dedup,omitempty"` + WorkDirFd *int32 `protobuf:"varint,17,opt,name=work_dir_fd,json=workDirFd" json:"work_dir_fd,omitempty"` + LinkRemap *bool `protobuf:"varint,18,opt,name=link_remap,json=linkRemap" json:"link_remap,omitempty"` + Veths []*CriuVethPair `protobuf:"bytes,19,rep,name=veths" json:"veths,omitempty"` + CpuCap *uint32 `protobuf:"varint,20,opt,name=cpu_cap,json=cpuCap,def=4294967295" json:"cpu_cap,omitempty"` + ForceIrmap *bool `protobuf:"varint,21,opt,name=force_irmap,json=forceIrmap" json:"force_irmap,omitempty"` + ExecCmd []string `protobuf:"bytes,22,rep,name=exec_cmd,json=execCmd" json:"exec_cmd,omitempty"` + ExtMnt []*ExtMountMap `protobuf:"bytes,23,rep,name=ext_mnt,json=extMnt" json:"ext_mnt,omitempty"` + ManageCgroups *bool `protobuf:"varint,24,opt,name=manage_cgroups,json=manageCgroups" json:"manage_cgroups,omitempty"` + CgRoot []*CgroupRoot `protobuf:"bytes,25,rep,name=cg_root,json=cgRoot" json:"cg_root,omitempty"` + RstSibling *bool `protobuf:"varint,26,opt,name=rst_sibling,json=rstSibling" json:"rst_sibling,omitempty"` + InheritFd []*InheritFd `protobuf:"bytes,27,rep,name=inherit_fd,json=inheritFd" json:"inherit_fd,omitempty"` + AutoExtMnt *bool `protobuf:"varint,28,opt,name=auto_ext_mnt,json=autoExtMnt" json:"auto_ext_mnt,omitempty"` + ExtSharing *bool `protobuf:"varint,29,opt,name=ext_sharing,json=extSharing" json:"ext_sharing,omitempty"` + ExtMasters *bool `protobuf:"varint,30,opt,name=ext_masters,json=extMasters" json:"ext_masters,omitempty"` + SkipMnt []string `protobuf:"bytes,31,rep,name=skip_mnt,json=skipMnt" json:"skip_mnt,omitempty"` + EnableFs []string `protobuf:"bytes,32,rep,name=enable_fs,json=enableFs" json:"enable_fs,omitempty"` + UnixSkIno []*UnixSk `protobuf:"bytes,33,rep,name=unix_sk_ino,json=unixSkIno" json:"unix_sk_ino,omitempty"` + ManageCgroupsMode *CriuCgMode `protobuf:"varint,34,opt,name=manage_cgroups_mode,json=manageCgroupsMode,enum=CriuCgMode" json:"manage_cgroups_mode,omitempty"` + GhostLimit *uint32 `protobuf:"varint,35,opt,name=ghost_limit,json=ghostLimit,def=1048576" json:"ghost_limit,omitempty"` + IrmapScanPaths []string `protobuf:"bytes,36,rep,name=irmap_scan_paths,json=irmapScanPaths" json:"irmap_scan_paths,omitempty"` + External []string `protobuf:"bytes,37,rep,name=external" json:"external,omitempty"` + EmptyNs *uint32 `protobuf:"varint,38,opt,name=empty_ns,json=emptyNs" json:"empty_ns,omitempty"` + JoinNs []*JoinNamespace `protobuf:"bytes,39,rep,name=join_ns,json=joinNs" json:"join_ns,omitempty"` + CgroupProps *string `protobuf:"bytes,41,opt,name=cgroup_props,json=cgroupProps" json:"cgroup_props,omitempty"` + CgroupPropsFile *string `protobuf:"bytes,42,opt,name=cgroup_props_file,json=cgroupPropsFile" json:"cgroup_props_file,omitempty"` + CgroupDumpController []string `protobuf:"bytes,43,rep,name=cgroup_dump_controller,json=cgroupDumpController" json:"cgroup_dump_controller,omitempty"` + FreezeCgroup *string `protobuf:"bytes,44,opt,name=freeze_cgroup,json=freezeCgroup" json:"freeze_cgroup,omitempty"` + Timeout *uint32 `protobuf:"varint,45,opt,name=timeout" json:"timeout,omitempty"` + TcpSkipInFlight *bool `protobuf:"varint,46,opt,name=tcp_skip_in_flight,json=tcpSkipInFlight" json:"tcp_skip_in_flight,omitempty"` + WeakSysctls *bool `protobuf:"varint,47,opt,name=weak_sysctls,json=weakSysctls" json:"weak_sysctls,omitempty"` + LazyPages *bool `protobuf:"varint,48,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"` + StatusFd *int32 `protobuf:"varint,49,opt,name=status_fd,json=statusFd" json:"status_fd,omitempty"` + OrphanPtsMaster *bool `protobuf:"varint,50,opt,name=orphan_pts_master,json=orphanPtsMaster" json:"orphan_pts_master,omitempty"` + ConfigFile *string `protobuf:"bytes,51,opt,name=config_file,json=configFile" json:"config_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuOpts) Reset() { *m = CriuOpts{} } +func (m *CriuOpts) String() string { return proto.CompactTextString(m) } +func (*CriuOpts) ProtoMessage() {} +func (*CriuOpts) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +const Default_CriuOpts_LogLevel int32 = 2 +const Default_CriuOpts_CpuCap uint32 = 4294967295 +const Default_CriuOpts_GhostLimit uint32 = 1048576 + +func (m *CriuOpts) GetImagesDirFd() int32 { + if m != nil && m.ImagesDirFd != nil { + return *m.ImagesDirFd + } + return 0 +} + +func (m *CriuOpts) GetPid() int32 { + if m != nil && m.Pid != nil { + return *m.Pid + } + return 0 +} + +func (m *CriuOpts) GetLeaveRunning() bool { + if m != nil && m.LeaveRunning != nil { + return *m.LeaveRunning + } + return false +} + +func (m *CriuOpts) GetExtUnixSk() bool { + if m != nil && m.ExtUnixSk != nil { + return *m.ExtUnixSk + } + return false +} + +func (m *CriuOpts) GetTcpEstablished() bool { + if m != nil && m.TcpEstablished != nil { + return *m.TcpEstablished + } + return false +} + +func (m *CriuOpts) GetEvasiveDevices() bool { + if m != nil && m.EvasiveDevices != nil { + return *m.EvasiveDevices + } + return false +} + +func (m *CriuOpts) GetShellJob() bool { + if m != nil && m.ShellJob != nil { + return *m.ShellJob + } + return false +} + +func (m *CriuOpts) GetFileLocks() bool { + if m != nil && m.FileLocks != nil { + return *m.FileLocks + } + return false +} + +func (m *CriuOpts) GetLogLevel() int32 { + if m != nil && m.LogLevel != nil { + return *m.LogLevel + } + return Default_CriuOpts_LogLevel +} + +func (m *CriuOpts) GetLogFile() string { + if m != nil && m.LogFile != nil { + return *m.LogFile + } + return "" +} + +func (m *CriuOpts) GetPs() *CriuPageServerInfo { + if m != nil { + return m.Ps + } + return nil +} + +func (m *CriuOpts) GetNotifyScripts() bool { + if m != nil && m.NotifyScripts != nil { + return *m.NotifyScripts + } + return false +} + +func (m *CriuOpts) GetRoot() string { + if m != nil && m.Root != nil { + return *m.Root + } + return "" +} + +func (m *CriuOpts) GetParentImg() string { + if m != nil && m.ParentImg != nil { + return *m.ParentImg + } + return "" +} + +func (m *CriuOpts) GetTrackMem() bool { + if m != nil && m.TrackMem != nil { + return *m.TrackMem + } + return false +} + +func (m *CriuOpts) GetAutoDedup() bool { + if m != nil && m.AutoDedup != nil { + return *m.AutoDedup + } + return false +} + +func (m *CriuOpts) GetWorkDirFd() int32 { + if m != nil && m.WorkDirFd != nil { + return *m.WorkDirFd + } + return 0 +} + +func (m *CriuOpts) GetLinkRemap() bool { + if m != nil && m.LinkRemap != nil { + return *m.LinkRemap + } + return false +} + +func (m *CriuOpts) GetVeths() []*CriuVethPair { + if m != nil { + return m.Veths + } + return nil +} + +func (m *CriuOpts) GetCpuCap() uint32 { + if m != nil && m.CpuCap != nil { + return *m.CpuCap + } + return Default_CriuOpts_CpuCap +} + +func (m *CriuOpts) GetForceIrmap() bool { + if m != nil && m.ForceIrmap != nil { + return *m.ForceIrmap + } + return false +} + +func (m *CriuOpts) GetExecCmd() []string { + if m != nil { + return m.ExecCmd + } + return nil +} + +func (m *CriuOpts) GetExtMnt() []*ExtMountMap { + if m != nil { + return m.ExtMnt + } + return nil +} + +func (m *CriuOpts) GetManageCgroups() bool { + if m != nil && m.ManageCgroups != nil { + return *m.ManageCgroups + } + return false +} + +func (m *CriuOpts) GetCgRoot() []*CgroupRoot { + if m != nil { + return m.CgRoot + } + return nil +} + +func (m *CriuOpts) GetRstSibling() bool { + if m != nil && m.RstSibling != nil { + return *m.RstSibling + } + return false +} + +func (m *CriuOpts) GetInheritFd() []*InheritFd { + if m != nil { + return m.InheritFd + } + return nil +} + +func (m *CriuOpts) GetAutoExtMnt() bool { + if m != nil && m.AutoExtMnt != nil { + return *m.AutoExtMnt + } + return false +} + +func (m *CriuOpts) GetExtSharing() bool { + if m != nil && m.ExtSharing != nil { + return *m.ExtSharing + } + return false +} + +func (m *CriuOpts) GetExtMasters() bool { + if m != nil && m.ExtMasters != nil { + return *m.ExtMasters + } + return false +} + +func (m *CriuOpts) GetSkipMnt() []string { + if m != nil { + return m.SkipMnt + } + return nil +} + +func (m *CriuOpts) GetEnableFs() []string { + if m != nil { + return m.EnableFs + } + return nil +} + +func (m *CriuOpts) GetUnixSkIno() []*UnixSk { + if m != nil { + return m.UnixSkIno + } + return nil +} + +func (m *CriuOpts) GetManageCgroupsMode() CriuCgMode { + if m != nil && m.ManageCgroupsMode != nil { + return *m.ManageCgroupsMode + } + return CriuCgMode_IGNORE +} + +func (m *CriuOpts) GetGhostLimit() uint32 { + if m != nil && m.GhostLimit != nil { + return *m.GhostLimit + } + return Default_CriuOpts_GhostLimit +} + +func (m *CriuOpts) GetIrmapScanPaths() []string { + if m != nil { + return m.IrmapScanPaths + } + return nil +} + +func (m *CriuOpts) GetExternal() []string { + if m != nil { + return m.External + } + return nil +} + +func (m *CriuOpts) GetEmptyNs() uint32 { + if m != nil && m.EmptyNs != nil { + return *m.EmptyNs + } + return 0 +} + +func (m *CriuOpts) GetJoinNs() []*JoinNamespace { + if m != nil { + return m.JoinNs + } + return nil +} + +func (m *CriuOpts) GetCgroupProps() string { + if m != nil && m.CgroupProps != nil { + return *m.CgroupProps + } + return "" +} + +func (m *CriuOpts) GetCgroupPropsFile() string { + if m != nil && m.CgroupPropsFile != nil { + return *m.CgroupPropsFile + } + return "" +} + +func (m *CriuOpts) GetCgroupDumpController() []string { + if m != nil { + return m.CgroupDumpController + } + return nil +} + +func (m *CriuOpts) GetFreezeCgroup() string { + if m != nil && m.FreezeCgroup != nil { + return *m.FreezeCgroup + } + return "" +} + +func (m *CriuOpts) GetTimeout() uint32 { + if m != nil && m.Timeout != nil { + return *m.Timeout + } + return 0 +} + +func (m *CriuOpts) GetTcpSkipInFlight() bool { + if m != nil && m.TcpSkipInFlight != nil { + return *m.TcpSkipInFlight + } + return false +} + +func (m *CriuOpts) GetWeakSysctls() bool { + if m != nil && m.WeakSysctls != nil { + return *m.WeakSysctls + } + return false +} + +func (m *CriuOpts) GetLazyPages() bool { + if m != nil && m.LazyPages != nil { + return *m.LazyPages + } + return false +} + +func (m *CriuOpts) GetStatusFd() int32 { + if m != nil && m.StatusFd != nil { + return *m.StatusFd + } + return 0 +} + +func (m *CriuOpts) GetOrphanPtsMaster() bool { + if m != nil && m.OrphanPtsMaster != nil { + return *m.OrphanPtsMaster + } + return false +} + +func (m *CriuOpts) GetConfigFile() string { + if m != nil && m.ConfigFile != nil { + return *m.ConfigFile + } + return "" +} + +type CriuDumpResp struct { + Restored *bool `protobuf:"varint,1,opt,name=restored" json:"restored,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuDumpResp) Reset() { *m = CriuDumpResp{} } +func (m *CriuDumpResp) String() string { return proto.CompactTextString(m) } +func (*CriuDumpResp) ProtoMessage() {} +func (*CriuDumpResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CriuDumpResp) GetRestored() bool { + if m != nil && m.Restored != nil { + return *m.Restored + } + return false +} + +type CriuRestoreResp struct { + Pid *int32 `protobuf:"varint,1,req,name=pid" json:"pid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuRestoreResp) Reset() { *m = CriuRestoreResp{} } +func (m *CriuRestoreResp) String() string { return proto.CompactTextString(m) } +func (*CriuRestoreResp) ProtoMessage() {} +func (*CriuRestoreResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *CriuRestoreResp) GetPid() int32 { + if m != nil && m.Pid != nil { + return *m.Pid + } + return 0 +} + +type CriuNotify struct { + Script *string `protobuf:"bytes,1,opt,name=script" json:"script,omitempty"` + Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuNotify) Reset() { *m = CriuNotify{} } +func (m *CriuNotify) String() string { return proto.CompactTextString(m) } +func (*CriuNotify) ProtoMessage() {} +func (*CriuNotify) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *CriuNotify) GetScript() string { + if m != nil && m.Script != nil { + return *m.Script + } + return "" +} + +func (m *CriuNotify) GetPid() int32 { + if m != nil && m.Pid != nil { + return *m.Pid + } + return 0 +} + +// +// List of features which can queried via +// CRIU_REQ_TYPE__FEATURE_CHECK +type CriuFeatures struct { + MemTrack *bool `protobuf:"varint,1,opt,name=mem_track,json=memTrack" json:"mem_track,omitempty"` + LazyPages *bool `protobuf:"varint,2,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuFeatures) Reset() { *m = CriuFeatures{} } +func (m *CriuFeatures) String() string { return proto.CompactTextString(m) } +func (*CriuFeatures) ProtoMessage() {} +func (*CriuFeatures) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *CriuFeatures) GetMemTrack() bool { + if m != nil && m.MemTrack != nil { + return *m.MemTrack + } + return false +} + +func (m *CriuFeatures) GetLazyPages() bool { + if m != nil && m.LazyPages != nil { + return *m.LazyPages + } + return false +} + +type CriuReq struct { + Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"` + Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"` + NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success,json=notifySuccess" json:"notify_success,omitempty"` + // + // When set service won't close the connection but + // will wait for more req-s to appear. Works not + // for all request types. + KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open,json=keepOpen" json:"keep_open,omitempty"` + // + // 'features' can be used to query which features + // are supported by the installed criu/kernel + // via RPC. + Features *CriuFeatures `protobuf:"bytes,5,opt,name=features" json:"features,omitempty"` + // 'pid' is used for WAIT_PID + Pid *uint32 `protobuf:"varint,6,opt,name=pid" json:"pid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuReq) Reset() { *m = CriuReq{} } +func (m *CriuReq) String() string { return proto.CompactTextString(m) } +func (*CriuReq) ProtoMessage() {} +func (*CriuReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CriuReq) GetType() CriuReqType { + if m != nil && m.Type != nil { + return *m.Type + } + return CriuReqType_EMPTY +} + +func (m *CriuReq) GetOpts() *CriuOpts { + if m != nil { + return m.Opts + } + return nil +} + +func (m *CriuReq) GetNotifySuccess() bool { + if m != nil && m.NotifySuccess != nil { + return *m.NotifySuccess + } + return false +} + +func (m *CriuReq) GetKeepOpen() bool { + if m != nil && m.KeepOpen != nil { + return *m.KeepOpen + } + return false +} + +func (m *CriuReq) GetFeatures() *CriuFeatures { + if m != nil { + return m.Features + } + return nil +} + +func (m *CriuReq) GetPid() uint32 { + if m != nil && m.Pid != nil { + return *m.Pid + } + return 0 +} + +type CriuResp struct { + Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"` + Success *bool `protobuf:"varint,2,req,name=success" json:"success,omitempty"` + Dump *CriuDumpResp `protobuf:"bytes,3,opt,name=dump" json:"dump,omitempty"` + Restore *CriuRestoreResp `protobuf:"bytes,4,opt,name=restore" json:"restore,omitempty"` + Notify *CriuNotify `protobuf:"bytes,5,opt,name=notify" json:"notify,omitempty"` + Ps *CriuPageServerInfo `protobuf:"bytes,6,opt,name=ps" json:"ps,omitempty"` + CrErrno *int32 `protobuf:"varint,7,opt,name=cr_errno,json=crErrno" json:"cr_errno,omitempty"` + Features *CriuFeatures `protobuf:"bytes,8,opt,name=features" json:"features,omitempty"` + CrErrmsg *string `protobuf:"bytes,9,opt,name=cr_errmsg,json=crErrmsg" json:"cr_errmsg,omitempty"` + Version *CriuVersion `protobuf:"bytes,10,opt,name=version" json:"version,omitempty"` + Status *int32 `protobuf:"varint,11,opt,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuResp) Reset() { *m = CriuResp{} } +func (m *CriuResp) String() string { return proto.CompactTextString(m) } +func (*CriuResp) ProtoMessage() {} +func (*CriuResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *CriuResp) GetType() CriuReqType { + if m != nil && m.Type != nil { + return *m.Type + } + return CriuReqType_EMPTY +} + +func (m *CriuResp) GetSuccess() bool { + if m != nil && m.Success != nil { + return *m.Success + } + return false +} + +func (m *CriuResp) GetDump() *CriuDumpResp { + if m != nil { + return m.Dump + } + return nil +} + +func (m *CriuResp) GetRestore() *CriuRestoreResp { + if m != nil { + return m.Restore + } + return nil +} + +func (m *CriuResp) GetNotify() *CriuNotify { + if m != nil { + return m.Notify + } + return nil +} + +func (m *CriuResp) GetPs() *CriuPageServerInfo { + if m != nil { + return m.Ps + } + return nil +} + +func (m *CriuResp) GetCrErrno() int32 { + if m != nil && m.CrErrno != nil { + return *m.CrErrno + } + return 0 +} + +func (m *CriuResp) GetFeatures() *CriuFeatures { + if m != nil { + return m.Features + } + return nil +} + +func (m *CriuResp) GetCrErrmsg() string { + if m != nil && m.CrErrmsg != nil { + return *m.CrErrmsg + } + return "" +} + +func (m *CriuResp) GetVersion() *CriuVersion { + if m != nil { + return m.Version + } + return nil +} + +func (m *CriuResp) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +// Answer for criu_req_type.VERSION requests +type CriuVersion struct { + Major *int32 `protobuf:"varint,1,req,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,req,name=minor" json:"minor,omitempty"` + Gitid *string `protobuf:"bytes,3,opt,name=gitid" json:"gitid,omitempty"` + Sublevel *int32 `protobuf:"varint,4,opt,name=sublevel" json:"sublevel,omitempty"` + Extra *int32 `protobuf:"varint,5,opt,name=extra" json:"extra,omitempty"` + Name *string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CriuVersion) Reset() { *m = CriuVersion{} } +func (m *CriuVersion) String() string { return proto.CompactTextString(m) } +func (*CriuVersion) ProtoMessage() {} +func (*CriuVersion) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *CriuVersion) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *CriuVersion) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *CriuVersion) GetGitid() string { + if m != nil && m.Gitid != nil { + return *m.Gitid + } + return "" +} + +func (m *CriuVersion) GetSublevel() int32 { + if m != nil && m.Sublevel != nil { + return *m.Sublevel + } + return 0 +} + +func (m *CriuVersion) GetExtra() int32 { + if m != nil && m.Extra != nil { + return *m.Extra + } + return 0 +} + +func (m *CriuVersion) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func init() { + proto.RegisterType((*CriuPageServerInfo)(nil), "criu_page_server_info") + proto.RegisterType((*CriuVethPair)(nil), "criu_veth_pair") + proto.RegisterType((*ExtMountMap)(nil), "ext_mount_map") + proto.RegisterType((*JoinNamespace)(nil), "join_namespace") + proto.RegisterType((*InheritFd)(nil), "inherit_fd") + proto.RegisterType((*CgroupRoot)(nil), "cgroup_root") + proto.RegisterType((*UnixSk)(nil), "unix_sk") + proto.RegisterType((*CriuOpts)(nil), "criu_opts") + proto.RegisterType((*CriuDumpResp)(nil), "criu_dump_resp") + proto.RegisterType((*CriuRestoreResp)(nil), "criu_restore_resp") + proto.RegisterType((*CriuNotify)(nil), "criu_notify") + proto.RegisterType((*CriuFeatures)(nil), "criu_features") + proto.RegisterType((*CriuReq)(nil), "criu_req") + proto.RegisterType((*CriuResp)(nil), "criu_resp") + proto.RegisterType((*CriuVersion)(nil), "criu_version") + proto.RegisterEnum("CriuCgMode", CriuCgMode_name, CriuCgMode_value) + proto.RegisterEnum("CriuReqType", CriuReqType_name, CriuReqType_value) +} + +func init() { proto.RegisterFile("rpc/rpc.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1835 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xeb, 0x72, 0x5b, 0xb7, + 0x11, 0x0e, 0x29, 0xf1, 0x06, 0x5e, 0x7c, 0x0c, 0x5f, 0x02, 0xc7, 0xb5, 0xad, 0xd0, 0x51, 0xa2, + 0x2a, 0x2e, 0x93, 0x30, 0x76, 0x5c, 0x67, 0xda, 0x1f, 0x1e, 0x8a, 0x74, 0xd8, 0x48, 0x22, 0x07, + 0xa4, 0xdc, 0xc9, 0x2f, 0xcc, 0xd1, 0x39, 0x20, 0x05, 0xf3, 0xdc, 0x0a, 0x80, 0x8a, 0xe4, 0x97, + 0xe8, 0xbf, 0x3e, 0x57, 0xde, 0xa4, 0xaf, 0xd0, 0xd9, 0x05, 0x28, 0x4b, 0x49, 0x66, 0xd2, 0x7f, + 0xd8, 0x0f, 0xbb, 0xc0, 0xde, 0x77, 0x49, 0x5b, 0x17, 0xd1, 0x57, 0xba, 0x88, 0x7a, 0x85, 0xce, + 0x6d, 0xde, 0x5d, 0x92, 0x7b, 0x91, 0x56, 0x6b, 0x51, 0x84, 0x4b, 0x29, 0x8c, 0xd4, 0xe7, 0x52, + 0x0b, 0x95, 0x2d, 0x72, 0xca, 0x48, 0x2d, 0x8c, 0x63, 0x2d, 0x8d, 0x61, 0xa5, 0x9d, 0xd2, 0x5e, + 0x83, 0x6f, 0x48, 0x4a, 0xc9, 0x76, 0x91, 0x6b, 0xcb, 0xca, 0x3b, 0xa5, 0xbd, 0x0a, 0xc7, 0x33, + 0x0d, 0xc8, 0x56, 0xa1, 0x62, 0xb6, 0x85, 0x10, 0x1c, 0x69, 0x87, 0x94, 0x17, 0x31, 0xdb, 0x46, + 0xa0, 0xbc, 0x88, 0xbb, 0x7f, 0x23, 0x1d, 0xfc, 0xe8, 0x5c, 0xda, 0x33, 0x51, 0x84, 0x4a, 0xd3, + 0x3b, 0xa4, 0xa2, 0x16, 0x42, 0x65, 0xac, 0xb4, 0x53, 0xde, 0x6b, 0xf0, 0x6d, 0xb5, 0x18, 0x67, + 0xf4, 0x1e, 0xa9, 0xaa, 0x85, 0xc8, 0xd7, 0xf0, 0x3c, 0xa0, 0x15, 0xb5, 0x98, 0xac, 0x6d, 0xf7, + 0x5b, 0xd2, 0x96, 0x17, 0x56, 0xa4, 0xf9, 0x3a, 0xb3, 0x22, 0x0d, 0x0b, 0xf8, 0x70, 0x25, 0x2f, + 0xbd, 0x28, 0x1c, 0x01, 0x39, 0x0f, 0x13, 0x2f, 0x06, 0xc7, 0xee, 0x5b, 0xd2, 0x79, 0x97, 0xab, + 0x4c, 0x64, 0x61, 0x2a, 0x4d, 0x11, 0x46, 0x12, 0x94, 0xca, 0x8c, 0x17, 0x2a, 0x67, 0x86, 0x7e, + 0x4c, 0x6a, 0x99, 0x11, 0x0b, 0x95, 0x48, 0x2f, 0x57, 0xcd, 0xcc, 0x48, 0x25, 0x92, 0x3e, 0x24, + 0x0d, 0x79, 0x61, 0x75, 0x28, 0xf2, 0xc2, 0xa2, 0x55, 0x0d, 0x5e, 0x47, 0x60, 0x52, 0xd8, 0x6e, + 0x8f, 0x10, 0x95, 0x9d, 0x49, 0xad, 0xac, 0x58, 0xc4, 0xbf, 0xa3, 0x89, 0x33, 0x1d, 0x1e, 0x74, + 0xa6, 0xbf, 0x20, 0xcd, 0x68, 0xa9, 0xf3, 0x75, 0x21, 0x74, 0x9e, 0x5b, 0xf0, 0x5f, 0x64, 0x75, + 0xe2, 0xdd, 0x8a, 0x67, 0xf4, 0x69, 0x68, 0xcf, 0xbc, 0x16, 0x78, 0xee, 0x3e, 0x21, 0xb5, 0x75, + 0xa6, 0x2e, 0x84, 0x59, 0xd1, 0xbb, 0xa4, 0xa2, 0xb2, 0x3c, 0x96, 0xf8, 0x4b, 0x9b, 0x3b, 0xa2, + 0xfb, 0xdf, 0x36, 0x69, 0xa0, 0x4f, 0xf3, 0xc2, 0x1a, 0xda, 0x25, 0x6d, 0x95, 0x86, 0x4b, 0x69, + 0x44, 0xac, 0xb4, 0x58, 0xc4, 0xc8, 0x5b, 0xe1, 0x4d, 0x07, 0x1e, 0x28, 0x3d, 0x8a, 0x37, 0x61, + 0x2a, 0x7f, 0x08, 0xd3, 0x53, 0xd2, 0x4e, 0x64, 0x78, 0x2e, 0x85, 0x5e, 0x67, 0x99, 0xca, 0x96, + 0x68, 0x6c, 0x9d, 0xb7, 0x10, 0xe4, 0x0e, 0xa3, 0x8f, 0x49, 0x13, 0xbc, 0xef, 0xb5, 0xc1, 0xa0, + 0xd6, 0x39, 0x38, 0xe8, 0x24, 0x53, 0x17, 0xb3, 0x15, 0xfd, 0x82, 0xdc, 0xb2, 0x51, 0x21, 0xa4, + 0xb1, 0xe1, 0x69, 0xa2, 0xcc, 0x99, 0x8c, 0x59, 0x05, 0x79, 0x3a, 0x36, 0x2a, 0x86, 0x1f, 0x50, + 0x60, 0x94, 0xe7, 0xa1, 0x51, 0xe7, 0x52, 0xc4, 0xf2, 0x5c, 0x45, 0xd2, 0xb0, 0xaa, 0x63, 0xf4, + 0xf0, 0x81, 0x43, 0xc1, 0xff, 0xe6, 0x4c, 0x26, 0x89, 0x78, 0x97, 0x9f, 0xb2, 0x1a, 0xb2, 0xd4, + 0x11, 0xf8, 0x47, 0x7e, 0x4a, 0x1f, 0x11, 0x02, 0x21, 0x13, 0x49, 0x1e, 0xad, 0x0c, 0xab, 0x3b, + 0x6d, 0x00, 0x39, 0x04, 0x80, 0x3e, 0x26, 0x8d, 0x24, 0x5f, 0x8a, 0x44, 0x9e, 0xcb, 0x84, 0x35, + 0xc0, 0xd4, 0xef, 0x4b, 0x7d, 0x5e, 0x4f, 0xf2, 0xe5, 0x21, 0x40, 0xf4, 0x01, 0x81, 0xb3, 0x8b, + 0x3a, 0x71, 0xa9, 0x9d, 0xe4, 0x4b, 0x0c, 0xfb, 0xe7, 0xa4, 0x5c, 0x18, 0xd6, 0xdc, 0x29, 0xed, + 0x35, 0xfb, 0xf7, 0x7b, 0xbf, 0x5b, 0x18, 0xbc, 0x5c, 0x18, 0xba, 0x4b, 0x3a, 0x59, 0x6e, 0xd5, + 0xe2, 0x52, 0x98, 0x48, 0xab, 0xc2, 0x1a, 0xd6, 0x42, 0x2d, 0xda, 0x0e, 0x9d, 0x39, 0x10, 0xa2, + 0x0a, 0x11, 0x67, 0x6d, 0x17, 0x69, 0x8c, 0xfe, 0x23, 0x42, 0x8a, 0x50, 0xcb, 0xcc, 0x0a, 0x95, + 0x2e, 0x59, 0x07, 0x6f, 0x1a, 0x0e, 0x19, 0xa7, 0x4b, 0x30, 0xdc, 0xea, 0x30, 0x5a, 0x89, 0x54, + 0xa6, 0xec, 0x96, 0x33, 0x1c, 0x81, 0x23, 0x99, 0x82, 0x6c, 0xb8, 0xb6, 0xb9, 0x88, 0x65, 0xbc, + 0x2e, 0x58, 0xe0, 0x0c, 0x07, 0xe4, 0x00, 0x00, 0x08, 0xd3, 0xcf, 0xb9, 0x5e, 0x6d, 0xe2, 0x7f, + 0x1b, 0xa3, 0xdc, 0x00, 0xc8, 0x45, 0xff, 0x11, 0x21, 0x89, 0xca, 0x56, 0x42, 0xcb, 0x34, 0x2c, + 0x18, 0x75, 0xe2, 0x80, 0x70, 0x00, 0xe8, 0x2e, 0xa9, 0x40, 0x71, 0x1a, 0x76, 0x67, 0x67, 0x6b, + 0xaf, 0xd9, 0xbf, 0xd5, 0xbb, 0x59, 0xaf, 0xdc, 0xdd, 0xd2, 0xa7, 0xa4, 0x16, 0x15, 0x6b, 0x11, + 0x85, 0x05, 0xbb, 0xbb, 0x53, 0xda, 0x6b, 0x7f, 0x4f, 0x9e, 0xf7, 0x5f, 0x3d, 0x7f, 0xf5, 0xdd, + 0xcb, 0xfe, 0xab, 0x17, 0xbc, 0x1a, 0x15, 0xeb, 0x41, 0x58, 0xd0, 0x27, 0xa4, 0xb9, 0xc8, 0x75, + 0x24, 0x85, 0xd2, 0xf0, 0xd7, 0x3d, 0xfc, 0x8b, 0x20, 0x34, 0x06, 0x04, 0x82, 0x20, 0x2f, 0x64, + 0x24, 0xa2, 0x34, 0x66, 0xf7, 0x77, 0xb6, 0x20, 0x08, 0x40, 0x0f, 0x52, 0x48, 0x92, 0x1a, 0xd6, + 0x7a, 0x66, 0xd9, 0xc7, 0xa8, 0x49, 0xa7, 0x77, 0xa3, 0xf6, 0x79, 0x55, 0x5e, 0xd8, 0xa3, 0xcc, + 0x42, 0x14, 0xd2, 0x30, 0x83, 0xf8, 0xb8, 0xf2, 0x32, 0x8c, 0xb9, 0x28, 0x38, 0x74, 0xe0, 0x40, + 0xba, 0x4b, 0x6a, 0xd1, 0x12, 0x4b, 0x8f, 0x3d, 0xc0, 0xf7, 0x5a, 0xbd, 0x6b, 0xe5, 0xc8, 0xab, + 0xd1, 0x92, 0x43, 0x60, 0x9e, 0x90, 0xa6, 0x36, 0x56, 0x18, 0x75, 0x9a, 0x40, 0x1d, 0x7c, 0xe2, + 0x54, 0xd6, 0xc6, 0xce, 0x1c, 0x42, 0xf7, 0xaf, 0x97, 0x3d, 0x7b, 0x88, 0x4f, 0x35, 0x7b, 0x1f, + 0x20, 0xde, 0xf0, 0xe7, 0x51, 0x4c, 0x77, 0x48, 0x0b, 0x23, 0xb5, 0x31, 0xe4, 0x4f, 0xee, 0x35, + 0xc0, 0x86, 0x4e, 0xf9, 0x27, 0xae, 0xa6, 0xcc, 0x59, 0xa8, 0xe1, 0xbb, 0x47, 0x8e, 0x41, 0x5e, + 0xd8, 0x99, 0x43, 0x36, 0x0c, 0x69, 0x68, 0xac, 0xd4, 0x86, 0x3d, 0xbe, 0x62, 0x38, 0x72, 0x08, + 0xb8, 0xd0, 0xac, 0x54, 0x81, 0xef, 0x3f, 0x71, 0x2e, 0x04, 0x1a, 0x1e, 0x87, 0xf6, 0x95, 0x85, + 0xa7, 0x89, 0x14, 0x0b, 0xc3, 0x76, 0xf0, 0xae, 0xee, 0x80, 0x91, 0xa1, 0x7b, 0xa4, 0xe9, 0x2b, + 0x59, 0xa8, 0x2c, 0x67, 0x9f, 0xa2, 0x21, 0xf5, 0x9e, 0xc7, 0x78, 0x63, 0x8d, 0x45, 0x3d, 0xce, + 0x72, 0xfa, 0x77, 0x72, 0xe7, 0xa6, 0x83, 0x45, 0x0a, 0x4d, 0xa8, 0xbb, 0x53, 0xda, 0xeb, 0xf4, + 0xdb, 0x2e, 0x3f, 0xa2, 0x25, 0x82, 0xfc, 0xf6, 0x0d, 0xa7, 0x1f, 0xe5, 0xb1, 0x84, 0x8f, 0x96, + 0x67, 0xb9, 0xb1, 0x22, 0x51, 0xa9, 0xb2, 0xec, 0x29, 0x66, 0x4b, 0xed, 0x9b, 0xaf, 0x9f, 0xff, + 0xf5, 0xc5, 0xcb, 0xef, 0x38, 0xc1, 0xbb, 0x43, 0xb8, 0xa2, 0x7b, 0x24, 0xc0, 0x44, 0x11, 0x26, + 0x0a, 0x33, 0x01, 0xdd, 0xcf, 0xb0, 0xcf, 0x50, 0xed, 0x0e, 0xe2, 0xb3, 0x28, 0xcc, 0xa6, 0x80, + 0xd2, 0x4f, 0x20, 0x6f, 0xac, 0xd4, 0x59, 0x98, 0xb0, 0x5d, 0x6f, 0x98, 0xa7, 0x31, 0xa7, 0xd2, + 0xc2, 0x5e, 0x8a, 0xcc, 0xb0, 0xcf, 0xe1, 0x33, 0x5e, 0x43, 0xfa, 0x18, 0x6c, 0xae, 0xb9, 0x51, + 0x60, 0xd8, 0x17, 0x3e, 0xbb, 0x6f, 0x8e, 0x06, 0x5e, 0x05, 0xfa, 0xd8, 0xd0, 0x4f, 0x49, 0xcb, + 0x67, 0x47, 0xa1, 0xf3, 0xc2, 0xb0, 0x3f, 0x63, 0x85, 0xfa, 0x06, 0x3e, 0x05, 0x88, 0xee, 0x93, + 0xdb, 0xd7, 0x59, 0x5c, 0x27, 0xd9, 0x47, 0xbe, 0x5b, 0xd7, 0xf8, 0xb0, 0xa3, 0x3c, 0x27, 0xf7, + 0x3d, 0x6f, 0xbc, 0x4e, 0x0b, 0x11, 0xe5, 0x99, 0xd5, 0x79, 0x92, 0x48, 0xcd, 0xbe, 0x44, 0xed, + 0xef, 0xba, 0xdb, 0x83, 0x75, 0x5a, 0x0c, 0xae, 0xee, 0xa0, 0x2b, 0x2f, 0xb4, 0x94, 0xef, 0x37, + 0x8e, 0x67, 0xcf, 0xf0, 0xf5, 0x96, 0x03, 0x9d, 0x8f, 0x61, 0x42, 0x5b, 0x95, 0x4a, 0x98, 0x95, + 0x7f, 0x71, 0xd6, 0x7a, 0x92, 0x7e, 0x49, 0x28, 0xf4, 0x63, 0xcc, 0x0e, 0x95, 0x89, 0x45, 0xa2, + 0x96, 0x67, 0x96, 0xf5, 0x30, 0x83, 0xa0, 0x53, 0xcf, 0x56, 0xaa, 0x18, 0x67, 0x23, 0x84, 0xc1, + 0xe0, 0x9f, 0x65, 0xb8, 0x12, 0xe6, 0xd2, 0x44, 0x36, 0x31, 0xec, 0x2b, 0x64, 0x6b, 0x02, 0x36, + 0x73, 0x10, 0x36, 0x8e, 0xf0, 0xfd, 0x25, 0xf6, 0x42, 0xc3, 0xbe, 0xf6, 0x8d, 0x23, 0x7c, 0x7f, + 0x39, 0x05, 0x00, 0x9b, 0xb5, 0x0d, 0xed, 0xda, 0x40, 0x5d, 0x7c, 0x83, 0x5d, 0xa7, 0xee, 0x80, + 0x51, 0x0c, 0xce, 0xca, 0x75, 0x71, 0x06, 0x61, 0xb5, 0xc6, 0x67, 0x33, 0xeb, 0x3b, 0x55, 0xdc, + 0xc5, 0xd4, 0x1a, 0x97, 0xd2, 0x90, 0xf2, 0x51, 0x9e, 0x2d, 0x94, 0x6f, 0xce, 0xdf, 0xa2, 0xd1, + 0xc4, 0x41, 0xe0, 0xcd, 0xee, 0x33, 0xbf, 0x44, 0xa0, 0x2f, 0xb5, 0x34, 0x05, 0xe4, 0x83, 0x96, + 0xc6, 0xe6, 0x5a, 0xc6, 0x38, 0x50, 0xeb, 0xfc, 0x8a, 0xee, 0xee, 0x92, 0xdb, 0xc8, 0xed, 0x01, + 0x27, 0xe0, 0x47, 0xa0, 0x1b, 0x8e, 0x70, 0xec, 0xbe, 0x24, 0x4d, 0x64, 0x73, 0xbd, 0x9b, 0xde, + 0x27, 0x55, 0xd7, 0xd4, 0xfd, 0x80, 0xf6, 0xd4, 0x6f, 0x67, 0x67, 0xf7, 0x47, 0xd2, 0x46, 0xc1, + 0x85, 0x0c, 0xed, 0x5a, 0x3b, 0x47, 0xa4, 0x32, 0x15, 0xd8, 0xaf, 0x37, 0xda, 0xa4, 0x32, 0x9d, + 0x03, 0xfd, 0x2b, 0x27, 0x96, 0x7f, 0xe5, 0xc4, 0xee, 0x2f, 0x25, 0x52, 0xf7, 0xda, 0xfe, 0x8b, + 0x76, 0xc9, 0xb6, 0xbd, 0x2c, 0xdc, 0xb8, 0xef, 0xf4, 0x3b, 0xbd, 0xcd, 0x85, 0x00, 0x94, 0xe3, + 0x1d, 0x7d, 0x4c, 0xb6, 0x61, 0xee, 0xe3, 0x4b, 0xcd, 0x3e, 0xe9, 0x5d, 0x6d, 0x02, 0x1c, 0xf1, + 0xeb, 0x33, 0x6a, 0x1d, 0x45, 0xb0, 0xc7, 0x6d, 0xdd, 0x98, 0x51, 0x0e, 0x04, 0x9d, 0x57, 0x52, + 0x16, 0x22, 0x2f, 0x64, 0xe6, 0x27, 0x7b, 0x1d, 0x80, 0x49, 0x21, 0x33, 0xba, 0x4f, 0xea, 0x1b, + 0xe3, 0x70, 0xa2, 0x37, 0x37, 0xba, 0x6c, 0x50, 0x7e, 0x75, 0xbf, 0xf1, 0x4f, 0x15, 0x53, 0x11, + 0xfd, 0xf3, 0xef, 0x2d, 0xbf, 0x9f, 0xa0, 0xe3, 0xff, 0x1f, 0x9b, 0x18, 0xa9, 0x6d, 0x94, 0x85, + 0x4d, 0xa8, 0xce, 0x37, 0x24, 0x7d, 0x4a, 0xb6, 0x21, 0xe8, 0x68, 0xc3, 0xd5, 0x6c, 0xba, 0x4a, + 0x03, 0x8e, 0x97, 0xf4, 0x19, 0xa9, 0xf9, 0x58, 0xa3, 0x25, 0xcd, 0x3e, 0xed, 0xfd, 0x26, 0x01, + 0xf8, 0x86, 0x85, 0x7e, 0x46, 0xaa, 0xce, 0x15, 0xde, 0xb4, 0x56, 0xef, 0x5a, 0x1a, 0x70, 0x7f, + 0xe7, 0x57, 0x82, 0xea, 0x1f, 0xae, 0x04, 0x0f, 0x20, 0x7c, 0x42, 0x6a, 0x9d, 0xe5, 0xb8, 0xb0, + 0x54, 0x78, 0x2d, 0xd2, 0x43, 0x20, 0x6f, 0x78, 0xb1, 0xfe, 0x07, 0x5e, 0x7c, 0x08, 0x2e, 0x83, + 0x67, 0x52, 0xb3, 0xc4, 0xe5, 0xa5, 0xc1, 0xeb, 0xf8, 0x4e, 0x6a, 0x96, 0x30, 0x19, 0xcf, 0xa5, + 0x36, 0x2a, 0xcf, 0x70, 0x71, 0x69, 0x6e, 0x7a, 0xb0, 0x07, 0xf9, 0xe6, 0x16, 0x73, 0x18, 0x0b, + 0x10, 0x77, 0x99, 0x0a, 0xf7, 0x54, 0xf7, 0x3f, 0x25, 0xd2, 0xba, 0x2e, 0x01, 0x8b, 0x65, 0x1a, + 0xbe, 0xcb, 0xb5, 0xaf, 0x07, 0x47, 0x20, 0xaa, 0xb2, 0x5c, 0xfb, 0x1d, 0xd6, 0x11, 0x80, 0x2e, + 0x95, 0xf5, 0x5b, 0x7e, 0x83, 0x3b, 0x02, 0x0a, 0xd0, 0xac, 0x4f, 0xdd, 0xb2, 0xb5, 0xed, 0x6b, + 0xdf, 0xd3, 0x20, 0x81, 0x4b, 0x33, 0x3a, 0xb8, 0xc2, 0x1d, 0x01, 0x5b, 0x11, 0xb4, 0x5d, 0xf4, + 0x69, 0x83, 0xe3, 0x79, 0x5f, 0x78, 0xbd, 0xfc, 0x34, 0xa1, 0x84, 0x54, 0xc7, 0x6f, 0x8e, 0x27, + 0x7c, 0x18, 0x7c, 0x44, 0x9b, 0xa4, 0x36, 0x78, 0x23, 0x8e, 0x27, 0xc7, 0xc3, 0xa0, 0x44, 0x1b, + 0xa4, 0x32, 0xe5, 0x93, 0xe9, 0x2c, 0x28, 0xd3, 0x3a, 0xd9, 0x9e, 0x4d, 0x46, 0xf3, 0x60, 0x0b, + 0x4e, 0xa3, 0x93, 0xc3, 0xc3, 0x60, 0x1b, 0xe4, 0x66, 0x73, 0x3e, 0x1e, 0xcc, 0x83, 0x0a, 0xc8, + 0x1d, 0x0c, 0x47, 0xaf, 0x4f, 0x0e, 0xe7, 0x41, 0x75, 0xff, 0x97, 0x92, 0x2f, 0xd6, 0x4d, 0xc6, + 0xc1, 0x4b, 0xc3, 0xa3, 0xe9, 0xfc, 0xa7, 0xe0, 0x23, 0x90, 0x3f, 0x38, 0x39, 0x9a, 0x06, 0x25, + 0x90, 0xe1, 0xc3, 0xd9, 0x1c, 0x3e, 0x2e, 0x03, 0xc7, 0xe0, 0x87, 0xe1, 0xe0, 0xc7, 0x60, 0x8b, + 0xb6, 0x48, 0x7d, 0xca, 0x87, 0x02, 0xb9, 0xb6, 0xe9, 0x2d, 0xd2, 0x9c, 0xbe, 0x7e, 0x33, 0x14, + 0xb3, 0x21, 0x7f, 0x3b, 0xe4, 0x41, 0x05, 0xbe, 0x3d, 0x9e, 0xcc, 0xc7, 0xa3, 0x9f, 0x82, 0x2a, + 0x0d, 0x48, 0x6b, 0x30, 0x3d, 0x19, 0x1f, 0x8f, 0x26, 0x8e, 0xbd, 0x46, 0x6f, 0x93, 0xf6, 0x06, + 0x71, 0xef, 0xd5, 0x01, 0x1a, 0x0d, 0x5f, 0xcf, 0x4f, 0xf8, 0xd0, 0x43, 0x0d, 0xf8, 0xfa, 0xed, + 0x90, 0xcf, 0xc6, 0x93, 0xe3, 0x80, 0xc0, 0x7f, 0xff, 0x7c, 0x3d, 0x9e, 0x8b, 0xe9, 0xf8, 0x20, + 0x68, 0xd2, 0xbb, 0x24, 0xb8, 0xf6, 0x9f, 0x18, 0xfc, 0x70, 0x78, 0x10, 0xb4, 0xfe, 0x17, 0x00, + 0x00, 0xff, 0xff, 0xf8, 0x9f, 0x0e, 0x7d, 0xca, 0x0d, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go new file mode 100644 index 000000000..c89d97f3e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go @@ -0,0 +1,1238 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/container.proto + +/* + Package events is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/events/container.proto + github.com/containerd/containerd/api/events/content.proto + github.com/containerd/containerd/api/events/image.proto + github.com/containerd/containerd/api/events/namespace.proto + github.com/containerd/containerd/api/events/snapshot.proto + github.com/containerd/containerd/api/events/task.proto + + It has these top-level messages: + ContainerCreate + ContainerUpdate + ContainerDelete + ContentDelete + ImageCreate + ImageUpdate + ImageDelete + NamespaceCreate + NamespaceUpdate + NamespaceDelete + SnapshotPrepare + SnapshotCommit + SnapshotRemove + TaskCreate + TaskStart + TaskDelete + TaskIO + TaskExit + TaskOOM + TaskExecAdded + TaskExecStarted + TaskPaused + TaskResumed + TaskCheckpointed +*/ +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" + +import typeurl "github.com/containerd/typeurl" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ContainerCreate struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime" json:"runtime,omitempty"` +} + +func (m *ContainerCreate) Reset() { *m = ContainerCreate{} } +func (*ContainerCreate) ProtoMessage() {} +func (*ContainerCreate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{0} } + +type ContainerCreate_Runtime struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *ContainerCreate_Runtime) Reset() { *m = ContainerCreate_Runtime{} } +func (*ContainerCreate_Runtime) ProtoMessage() {} +func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) { + return fileDescriptorContainer, []int{0, 0} +} + +type ContainerUpdate struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SnapshotKey string `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` +} + +func (m *ContainerUpdate) Reset() { *m = ContainerUpdate{} } +func (*ContainerUpdate) ProtoMessage() {} +func (*ContainerUpdate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{1} } + +type ContainerDelete struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *ContainerDelete) Reset() { *m = ContainerDelete{} } +func (*ContainerDelete) ProtoMessage() {} +func (*ContainerDelete) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{2} } + +func init() { + proto.RegisterType((*ContainerCreate)(nil), "containerd.events.ContainerCreate") + proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.events.ContainerCreate.Runtime") + proto.RegisterType((*ContainerUpdate)(nil), "containerd.events.ContainerUpdate") + proto.RegisterType((*ContainerDelete)(nil), "containerd.events.ContainerDelete") +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + case "image": + return string(m.Image), len(m.Image) > 0 + case "runtime": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.Runtime == nil { + return "", false + } + + return m.Runtime.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "options": + decoded, err := typeurl.UnmarshalAny(m.Options) + if err != nil { + return "", false + } + + adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + case "image": + return string(m.Image), len(m.Image) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + case "snapshot_key": + return string(m.SnapshotKey), len(m.SnapshotKey) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} +func (m *ContainerCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Image) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if m.Runtime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintContainer(dAtA, i, uint64(m.Runtime.Size())) + n1, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *ContainerCreate_Runtime) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerCreate_Runtime) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintContainer(dAtA, i, uint64(m.Options.Size())) + n2, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *ContainerUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Image) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v))) + i = encodeVarintContainer(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.SnapshotKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey))) + i += copy(dAtA[i:], m.SnapshotKey) + } + return i, nil +} + +func (m *ContainerDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func encodeVarintContainer(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ContainerCreate) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + l = len(m.Image) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + if m.Runtime != nil { + l = m.Runtime.Size() + n += 1 + l + sovContainer(uint64(l)) + } + return n +} + +func (m *ContainerCreate_Runtime) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovContainer(uint64(l)) + } + return n +} + +func (m *ContainerUpdate) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + l = len(m.Image) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v))) + n += mapEntrySize + 1 + sovContainer(uint64(mapEntrySize)) + } + } + l = len(m.SnapshotKey) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + return n +} + +func (m *ContainerDelete) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovContainer(uint64(l)) + } + return n +} + +func sovContainer(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozContainer(x uint64) (n int) { + return sovContainer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerCreate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerCreate{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "ContainerCreate_Runtime", "ContainerCreate_Runtime", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerCreate_Runtime) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerCreate_Runtime{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerUpdate) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ContainerUpdate{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Labels:` + mapStringForLabels + `,`, + `SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerDelete) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerDelete{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func valueToStringContainer(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Runtime == nil { + m.Runtime = &ContainerCreate_Runtime{} + } + if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerCreate_Runtime) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Runtime: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContainer + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContainer + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContainer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainer + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SnapshotKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainer + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainer(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipContainer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthContainer + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipContainer(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowContainer = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptorContainer) +} + +var fileDescriptorContainer = []byte{ + // 413 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30, + 0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21, + 0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb, + 0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71, + 0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f, + 0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97, + 0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96, + 0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c, + 0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56, + 0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23, + 0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7, + 0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06, + 0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b, + 0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86, + 0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f, + 0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b, + 0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58, + 0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9, + 0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a, + 0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89, + 0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8, + 0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf, + 0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde, + 0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b, + 0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/container.proto b/vendor/github.com/containerd/containerd/api/events/container.proto new file mode 100644 index 000000000..13aa5848c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/container.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package containerd.events; + +import "google/protobuf/any.proto"; +import weak "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message ContainerCreate { + string id = 1; + string image = 2; + message Runtime { + string name = 1; + google.protobuf.Any options = 2; + } + Runtime runtime = 3; +} + +message ContainerUpdate { + string id = 1; + string image = 2; + map labels = 3; + string snapshot_key = 4; +} + +message ContainerDelete { + string id = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/events/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go new file mode 100644 index 000000000..87648d193 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go @@ -0,0 +1,329 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/content.proto + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ContentDelete struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` +} + +func (m *ContentDelete) Reset() { *m = ContentDelete{} } +func (*ContentDelete) ProtoMessage() {} +func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} } + +func init() { + proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete") +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContentDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return string(m.Digest), len(m.Digest) > 0 + } + return "", false +} +func (m *ContentDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + return i, nil +} + +func encodeVarintContent(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ContentDelete) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func sovContent(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozContent(x uint64) (n int) { + return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContentDelete) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContentDelete{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `}`, + }, "") + return s +} +func valueToStringContent(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContentDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipContent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthContent + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipContent(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptorContent) +} + +var fileDescriptorContent = []byte{ + // 228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53, + 0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b, + 0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53, + 0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76, + 0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d, + 0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8, + 0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, + 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, + 0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11, + 0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee, + 0x61, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/content.proto b/vendor/github.com/containerd/containerd/api/events/content.proto new file mode 100644 index 000000000..aba50716f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/content.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package containerd.events; + +import weak "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message ContentDelete { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/containerd/containerd/api/events/doc.go b/vendor/github.com/containerd/containerd/api/events/doc.go new file mode 100644 index 000000000..354bef79f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/doc.go @@ -0,0 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package events has protobuf types for various events that are used in +// containerd. +package events diff --git a/vendor/github.com/containerd/containerd/api/events/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go new file mode 100644 index 000000000..8197005b6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go @@ -0,0 +1,949 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/image.proto + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ImageCreate struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ImageCreate) Reset() { *m = ImageCreate{} } +func (*ImageCreate) ProtoMessage() {} +func (*ImageCreate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} } + +type ImageUpdate struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ImageUpdate) Reset() { *m = ImageUpdate{} } +func (*ImageUpdate) ProtoMessage() {} +func (*ImageUpdate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} } + +type ImageDelete struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *ImageDelete) Reset() { *m = ImageDelete{} } +func (*ImageDelete) ProtoMessage() {} +func (*ImageDelete) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} } + +func init() { + proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate") + proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate") + proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete") +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} +func (m *ImageCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) + i = encodeVarintImage(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintImage(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintImage(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *ImageUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) + i = encodeVarintImage(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintImage(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintImage(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *ImageDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func encodeVarintImage(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ImageCreate) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovImage(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) + n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageUpdate) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovImage(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) + n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageDelete) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovImage(uint64(l)) + } + return n +} + +func sovImage(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozImage(x uint64) (n int) { + return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageCreate) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ImageCreate{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *ImageUpdate) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ImageUpdate{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *ImageDelete) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageDelete{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringImage(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImage + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthImage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImage + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthImage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthImage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowImage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthImage + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipImage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthImage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipImage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthImage + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowImage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipImage(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowImage = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptorImage) +} + +var fileDescriptorImage = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6, + 0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7, + 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, + 0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, + 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, + 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, + 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, + 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, + 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, + 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, + 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, + 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, + 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, + 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, + 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11, + 0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17, + 0x77, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/image.proto b/vendor/github.com/containerd/containerd/api/events/image.proto new file mode 100644 index 000000000..470c3a2fa --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/image.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package containerd.services.images.v1; + +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message ImageCreate { + string name = 1; + map labels = 2; +} + +message ImageUpdate { + string name = 1; + map labels = 2; +} + +message ImageDelete { + string name = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go new file mode 100644 index 000000000..1c81f9fc4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go @@ -0,0 +1,950 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/namespace.proto + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type NamespaceCreate struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NamespaceCreate) Reset() { *m = NamespaceCreate{} } +func (*NamespaceCreate) ProtoMessage() {} +func (*NamespaceCreate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} } + +type NamespaceUpdate struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NamespaceUpdate) Reset() { *m = NamespaceUpdate{} } +func (*NamespaceUpdate) ProtoMessage() {} +func (*NamespaceUpdate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} } + +type NamespaceDelete struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *NamespaceDelete) Reset() { *m = NamespaceDelete{} } +func (*NamespaceDelete) ProtoMessage() {} +func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} } + +func init() { + proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate") + proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate") + proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete") +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} +func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) + i = encodeVarintNamespace(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) + i = encodeVarintNamespace(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *NamespaceCreate) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovNamespace(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) + n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NamespaceUpdate) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovNamespace(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) + n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NamespaceDelete) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovNamespace(uint64(l)) + } + return n +} + +func sovNamespace(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozNamespace(x uint64) (n int) { + return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NamespaceCreate) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&NamespaceCreate{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceUpdate) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&NamespaceUpdate{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceDelete) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceDelete{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringNamespace(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NamespaceCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNamespace + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNamespace + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNamespace + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNamespace + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthNamespace + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNamespace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNamespace + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNamespace(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNamespace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNamespace(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNamespace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNamespace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNamespace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthNamespace + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNamespace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipNamespace(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptorNamespace) +} + +var fileDescriptorNamespace = []byte{ + // 296 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25, + 0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, + 0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88, + 0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67, + 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1, + 0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05, + 0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27, + 0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d, + 0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b, + 0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31, + 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15, + 0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa, + 0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86, + 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21, + 0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.proto b/vendor/github.com/containerd/containerd/api/events/namespace.proto new file mode 100644 index 000000000..45deae79a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/namespace.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package containerd.events; + +import weak "gogoproto/gogo.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message NamespaceCreate { + string name = 1; + map labels = 2; +} + +message NamespaceUpdate { + string name = 1; + map labels = 2; +} + +message NamespaceDelete { + string name = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go new file mode 100644 index 000000000..e1f8f5c58 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go @@ -0,0 +1,704 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/snapshot.proto + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SnapshotPrepare struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` +} + +func (m *SnapshotPrepare) Reset() { *m = SnapshotPrepare{} } +func (*SnapshotPrepare) ProtoMessage() {} +func (*SnapshotPrepare) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} } + +type SnapshotCommit struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *SnapshotCommit) Reset() { *m = SnapshotCommit{} } +func (*SnapshotCommit) ProtoMessage() {} +func (*SnapshotCommit) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} } + +type SnapshotRemove struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *SnapshotRemove) Reset() { *m = SnapshotRemove{} } +func (*SnapshotRemove) ProtoMessage() {} +func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} } + +func init() { + proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare") + proto.RegisterType((*SnapshotCommit)(nil), "containerd.events.SnapshotCommit") + proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove") +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "parent": + return string(m.Parent), len(m.Parent) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + } + return "", false +} +func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Parent) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent))) + i += copy(dAtA[i:], m.Parent) + } + return i, nil +} + +func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *SnapshotPrepare) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + l = len(m.Parent) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} + +func (m *SnapshotCommit) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} + +func (m *SnapshotRemove) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSnapshot(uint64(l)) + } + return n +} + +func sovSnapshot(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnapshot(x uint64) (n int) { + return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SnapshotPrepare) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SnapshotPrepare{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, + `}`, + }, "") + return s +} +func (this *SnapshotCommit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SnapshotCommit{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SnapshotRemove) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SnapshotRemove{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func valueToStringSnapshot(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotRemove) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnapshot(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnapshot + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnapshot(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptorSnapshot) +} + +var fileDescriptorSnapshot = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7, + 0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54, + 0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7, + 0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8, + 0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01, + 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c, + 0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60, + 0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, + 0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9, + 0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, + 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3, + 0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.proto b/vendor/github.com/containerd/containerd/api/events/snapshot.proto new file mode 100644 index 000000000..425eeec8e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package containerd.events; + +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message SnapshotPrepare { + string key = 1; + string parent = 2; +} + +message SnapshotCommit { + string key = 1; + string name = 2; +} + +message SnapshotRemove { + string key = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/events/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go new file mode 100644 index 000000000..b0a6c2c24 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go @@ -0,0 +1,2610 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/task.proto + +package events + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import _ "github.com/gogo/protobuf/types" +import containerd_types "github.com/containerd/containerd/api/types" + +// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin" + +import time "time" + +import types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type TaskCreate struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*containerd_types.Mount `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"` + IO *TaskIO `protobuf:"bytes,4,opt,name=io" json:"io,omitempty"` + Checkpoint string `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + Pid uint32 `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *TaskCreate) Reset() { *m = TaskCreate{} } +func (*TaskCreate) ProtoMessage() {} +func (*TaskCreate) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} } + +type TaskStart struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *TaskStart) Reset() { *m = TaskStart{} } +func (*TaskStart) ProtoMessage() {} +func (*TaskStart) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} } + +type TaskDelete struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *TaskDelete) Reset() { *m = TaskDelete{} } +func (*TaskDelete) ProtoMessage() {} +func (*TaskDelete) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{2} } + +type TaskIO struct { + Stdin string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` +} + +func (m *TaskIO) Reset() { *m = TaskIO{} } +func (*TaskIO) ProtoMessage() {} +func (*TaskIO) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{3} } + +type TaskExit struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *TaskExit) Reset() { *m = TaskExit{} } +func (*TaskExit) ProtoMessage() {} +func (*TaskExit) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{4} } + +type TaskOOM struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *TaskOOM) Reset() { *m = TaskOOM{} } +func (*TaskOOM) ProtoMessage() {} +func (*TaskOOM) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{5} } + +type TaskExecAdded struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *TaskExecAdded) Reset() { *m = TaskExecAdded{} } +func (*TaskExecAdded) ProtoMessage() {} +func (*TaskExecAdded) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{6} } + +type TaskExecStarted struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *TaskExecStarted) Reset() { *m = TaskExecStarted{} } +func (*TaskExecStarted) ProtoMessage() {} +func (*TaskExecStarted) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{7} } + +type TaskPaused struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *TaskPaused) Reset() { *m = TaskPaused{} } +func (*TaskPaused) ProtoMessage() {} +func (*TaskPaused) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{8} } + +type TaskResumed struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *TaskResumed) Reset() { *m = TaskResumed{} } +func (*TaskResumed) ProtoMessage() {} +func (*TaskResumed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{9} } + +type TaskCheckpointed struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Checkpoint string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` +} + +func (m *TaskCheckpointed) Reset() { *m = TaskCheckpointed{} } +func (*TaskCheckpointed) ProtoMessage() {} +func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{10} } + +func init() { + proto.RegisterType((*TaskCreate)(nil), "containerd.events.TaskCreate") + proto.RegisterType((*TaskStart)(nil), "containerd.events.TaskStart") + proto.RegisterType((*TaskDelete)(nil), "containerd.events.TaskDelete") + proto.RegisterType((*TaskIO)(nil), "containerd.events.TaskIO") + proto.RegisterType((*TaskExit)(nil), "containerd.events.TaskExit") + proto.RegisterType((*TaskOOM)(nil), "containerd.events.TaskOOM") + proto.RegisterType((*TaskExecAdded)(nil), "containerd.events.TaskExecAdded") + proto.RegisterType((*TaskExecStarted)(nil), "containerd.events.TaskExecStarted") + proto.RegisterType((*TaskPaused)(nil), "containerd.events.TaskPaused") + proto.RegisterType((*TaskResumed)(nil), "containerd.events.TaskResumed") + proto.RegisterType((*TaskCheckpointed)(nil), "containerd.events.TaskCheckpointed") +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: rootfs + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "bundle": + return string(m.Bundle), len(m.Bundle) > 0 + case "io": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.IO == nil { + return "", false + } + + return m.IO.Field(fieldpath[1:]) + case "checkpoint": + return string(m.Checkpoint), len(m.Checkpoint) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskStart) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: pid + // unhandled: exit_status + // unhandled: exited_at + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskIO) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "stdin": + return string(m.Stdin), len(m.Stdin) > 0 + case "stdout": + return string(m.Stdout), len(m.Stdout) > 0 + case "stderr": + return string(m.Stderr), len(m.Stderr) > 0 + case "terminal": + return fmt.Sprint(m.Terminal), true + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: pid + // unhandled: exit_status + // unhandled: exited_at + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskOOM) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "exec_id": + return string(m.ExecID), len(m.ExecID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "exec_id": + return string(m.ExecID), len(m.ExecID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskPaused) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskResumed) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "checkpoint": + return string(m.Checkpoint), len(m.Checkpoint) > 0 + } + return "", false +} +func (m *TaskCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.Bundle) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Bundle))) + i += copy(dAtA[i:], m.Bundle) + } + if len(m.Rootfs) > 0 { + for _, msg := range m.Rootfs { + dAtA[i] = 0x1a + i++ + i = encodeVarintTask(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.IO != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.IO.Size())) + n1, err := m.IO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Checkpoint) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint))) + i += copy(dAtA[i:], m.Checkpoint) + } + if m.Pid != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *TaskStart) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStart) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.Pid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *TaskDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.Pid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + if m.ExitStatus != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) + n2, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *TaskIO) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskIO) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Stdin) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Terminal { + dAtA[i] = 0x20 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *TaskExit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskExit) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Pid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + if m.ExitStatus != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x2a + i++ + i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt))) + n3, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TaskOOM) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskOOM) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskExecAdded) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskExecStarted) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Pid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *TaskPaused) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskPaused) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *TaskResumed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskResumed) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *TaskCheckpointed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCheckpointed) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.Checkpoint) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint))) + i += copy(dAtA[i:], m.Checkpoint) + } + return i, nil +} + +func encodeVarintTask(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TaskCreate) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Bundle) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if len(m.Rootfs) > 0 { + for _, e := range m.Rootfs { + l = e.Size() + n += 1 + l + sovTask(uint64(l)) + } + } + if m.IO != nil { + l = m.IO.Size() + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Checkpoint) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + return n +} + +func (m *TaskStart) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + return n +} + +func (m *TaskDelete) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + if m.ExitStatus != 0 { + n += 1 + sovTask(uint64(m.ExitStatus)) + } + l = types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovTask(uint64(l)) + return n +} + +func (m *TaskIO) Size() (n int) { + var l int + _ = l + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Terminal { + n += 2 + } + return n +} + +func (m *TaskExit) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.ID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + if m.ExitStatus != 0 { + n += 1 + sovTask(uint64(m.ExitStatus)) + } + l = types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovTask(uint64(l)) + return n +} + +func (m *TaskOOM) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func (m *TaskExecAdded) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func (m *TaskExecStarted) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + return n +} + +func (m *TaskPaused) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func (m *TaskResumed) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func (m *TaskCheckpointed) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Checkpoint) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func sovTask(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTask(x uint64) (n int) { + return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TaskCreate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCreate{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, + `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`, + `IO:` + strings.Replace(fmt.Sprintf("%v", this.IO), "TaskIO", "TaskIO", 1) + `,`, + `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStart) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStart{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *TaskDelete) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskDelete{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskIO) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskIO{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `}`, + }, "") + return s +} +func (this *TaskExit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskExit{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskOOM) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskOOM{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *TaskExecAdded) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskExecAdded{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *TaskExecStarted) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskExecStarted{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *TaskPaused) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskPaused{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *TaskResumed) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskResumed{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *TaskCheckpointed) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCheckpointed{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, + `}`, + }, "") + return s +} +func valueToStringTask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TaskCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rootfs = append(m.Rootfs, &containerd_types.Mount{}) + if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IO == nil { + m.IO = &TaskIO{} + } + if err := m.IO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStart) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStart: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStart: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskIO) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskIO: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskIO: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskExit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskExit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskExit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskOOM) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskOOM: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskOOM: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskExecAdded) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskExecAdded: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskExecAdded: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskExecStarted) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskExecStarted: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskExecStarted: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskPaused) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskPaused: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskPaused: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskResumed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskResumed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskResumed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskCheckpointed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCheckpointed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCheckpointed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptorTask) +} + +var fileDescriptorTask = []byte{ + // 637 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0x63, 0xa7, 0x75, 0x93, 0x09, 0x55, 0x8b, 0x55, 0x41, 0xc8, 0xc1, 0x8e, 0xcc, 0x25, + 0x27, 0x5b, 0x04, 0x89, 0x0b, 0x42, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x4a, 0x71, 0x7b, 0xa8, 0xb8, + 0x44, 0x4e, 0x76, 0x93, 0x2c, 0x8d, 0xbd, 0x96, 0x3d, 0x46, 0x45, 0xe2, 0xc0, 0x23, 0xf0, 0x08, + 0x3c, 0x05, 0xcf, 0xd0, 0x03, 0x07, 0x8e, 0x9c, 0x02, 0xf5, 0x03, 0x70, 0xe2, 0x01, 0xd0, 0x7a, + 0x1d, 0xb7, 0x50, 0xf1, 0x65, 0x89, 0x53, 0x76, 0x66, 0x67, 0xff, 0x33, 0xf3, 0xdb, 0xc9, 0x1a, + 0x1e, 0xcd, 0x19, 0x2e, 0x92, 0x89, 0x3d, 0xe5, 0xbe, 0x33, 0xe5, 0x01, 0x7a, 0x2c, 0xa0, 0x11, + 0xb9, 0xbe, 0xf4, 0x42, 0xe6, 0xd0, 0x97, 0x34, 0xc0, 0xd8, 0x41, 0x2f, 0x3e, 0xb3, 0xc3, 0x88, + 0x23, 0xd7, 0x6f, 0x5f, 0x45, 0xd8, 0x72, 0xb7, 0xb5, 0x37, 0xe7, 0x73, 0x9e, 0xed, 0x3a, 0x62, + 0x25, 0x03, 0x5b, 0xe6, 0x9c, 0xf3, 0xf9, 0x92, 0x3a, 0x99, 0x35, 0x49, 0x66, 0x0e, 0x32, 0x9f, + 0xc6, 0xe8, 0xf9, 0x61, 0x1e, 0xf0, 0x77, 0x15, 0xe0, 0xab, 0x90, 0xc6, 0x8e, 0xcf, 0x93, 0x00, + 0xf3, 0x73, 0xfb, 0x7f, 0x3c, 0x57, 0xa4, 0x0c, 0x97, 0xc9, 0x9c, 0x05, 0xce, 0x8c, 0xd1, 0x25, + 0x09, 0x3d, 0x5c, 0x48, 0x05, 0xeb, 0xab, 0x02, 0x70, 0xe2, 0xc5, 0x67, 0x07, 0x11, 0xf5, 0x90, + 0xea, 0x5d, 0xb8, 0x55, 0x1c, 0x1e, 0x33, 0xd2, 0x54, 0xda, 0x4a, 0xa7, 0xde, 0xdf, 0x49, 0x57, + 0x66, 0xe3, 0x60, 0xed, 0x1f, 0x0e, 0xdc, 0x46, 0x11, 0x34, 0x24, 0xfa, 0x1d, 0xd0, 0x26, 0x49, + 0x40, 0x96, 0xb4, 0xa9, 0x8a, 0x68, 0x37, 0xb7, 0x74, 0x07, 0xb4, 0x88, 0x73, 0x9c, 0xc5, 0xcd, + 0x6a, 0xbb, 0xda, 0x69, 0x74, 0xef, 0xda, 0xd7, 0x78, 0x65, 0xbd, 0xd8, 0x87, 0xa2, 0x17, 0x37, + 0x0f, 0xd3, 0x1f, 0x80, 0xca, 0x78, 0x73, 0xa3, 0xad, 0x74, 0x1a, 0xdd, 0x7b, 0xf6, 0x0d, 0xb8, + 0xb6, 0xa8, 0x73, 0x38, 0xea, 0x6b, 0xe9, 0xca, 0x54, 0x87, 0x23, 0x57, 0x65, 0x5c, 0x37, 0x00, + 0xa6, 0x0b, 0x3a, 0x3d, 0x0b, 0x39, 0x0b, 0xb0, 0xb9, 0x99, 0xe5, 0xbf, 0xe6, 0xd1, 0x77, 0xa1, + 0x1a, 0x32, 0xd2, 0xd4, 0xda, 0x4a, 0x67, 0xdb, 0x15, 0x4b, 0xeb, 0x19, 0xd4, 0x85, 0xce, 0x31, + 0x7a, 0x11, 0x96, 0x6a, 0x37, 0x97, 0x54, 0xaf, 0x24, 0xdf, 0xe7, 0x0c, 0x07, 0x74, 0x49, 0x4b, + 0x32, 0xbc, 0x21, 0xaa, 0x9b, 0xd0, 0xa0, 0xe7, 0x0c, 0xc7, 0x31, 0x7a, 0x98, 0x08, 0x84, 0x62, + 0x07, 0x84, 0xeb, 0x38, 0xf3, 0xe8, 0x3d, 0xa8, 0x0b, 0x8b, 0x92, 0xb1, 0x87, 0x39, 0xb4, 0x96, + 0x2d, 0x07, 0xcd, 0x5e, 0xdf, 0xba, 0x7d, 0xb2, 0x1e, 0xb4, 0x7e, 0xed, 0x62, 0x65, 0x56, 0xde, + 0x7e, 0x36, 0x15, 0xb7, 0x26, 0x8f, 0xf5, 0xd0, 0x7a, 0x01, 0x9a, 0x64, 0xaa, 0xef, 0xc1, 0x66, + 0x8c, 0x84, 0x05, 0xb2, 0x58, 0x57, 0x1a, 0xe2, 0x66, 0x63, 0x24, 0x3c, 0xc1, 0xf5, 0xcd, 0x4a, + 0x2b, 0xf7, 0xd3, 0x28, 0xca, 0xca, 0x92, 0x7e, 0x1a, 0x45, 0x7a, 0x0b, 0x6a, 0x48, 0x23, 0x9f, + 0x05, 0xde, 0x32, 0xab, 0xa8, 0xe6, 0x16, 0xb6, 0xf5, 0x41, 0x81, 0x9a, 0x48, 0xf6, 0xf4, 0x9c, + 0x61, 0xc9, 0x31, 0x53, 0x73, 0x42, 0xf5, 0x7c, 0x04, 0x06, 0xae, 0xca, 0x0a, 0x74, 0xd5, 0x5f, + 0xa2, 0xdb, 0xf8, 0x3d, 0xba, 0xcd, 0x52, 0xe8, 0x9e, 0xc0, 0x96, 0xe8, 0x66, 0x34, 0x3a, 0x2c, + 0xd3, 0x8c, 0xb5, 0x80, 0x6d, 0x09, 0x83, 0x4e, 0x7b, 0x84, 0x50, 0x52, 0x8a, 0xc8, 0x7d, 0xd8, + 0xa2, 0xe7, 0x74, 0x3a, 0x2e, 0xb0, 0x40, 0xba, 0x32, 0x35, 0xa1, 0x39, 0x1c, 0xb8, 0x9a, 0xd8, + 0x1a, 0x12, 0xeb, 0x35, 0xec, 0xac, 0x33, 0x65, 0x33, 0xff, 0x1f, 0x73, 0xdd, 0xbc, 0x0a, 0x6b, + 0x5f, 0xfe, 0x33, 0x8e, 0xbc, 0x24, 0x2e, 0x97, 0xd8, 0xea, 0x41, 0x43, 0x28, 0xb8, 0x34, 0x4e, + 0xfc, 0x92, 0x12, 0x33, 0xd8, 0xcd, 0x9e, 0xb8, 0xe2, 0x59, 0x28, 0xc9, 0xe0, 0xc7, 0xc7, 0x46, + 0xfd, 0xf9, 0xb1, 0xe9, 0x1f, 0x5d, 0x5c, 0x1a, 0x95, 0x4f, 0x97, 0x46, 0xe5, 0x4d, 0x6a, 0x28, + 0x17, 0xa9, 0xa1, 0x7c, 0x4c, 0x0d, 0xe5, 0x4b, 0x6a, 0x28, 0xef, 0xbe, 0x19, 0xca, 0xf3, 0xee, + 0x3f, 0x7c, 0x65, 0x1e, 0xcb, 0x9f, 0xd3, 0xca, 0x69, 0x75, 0xa2, 0x65, 0x13, 0xf9, 0xf0, 0x7b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x07, 0x69, 0x62, 0x9d, 0xa6, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/events/task.proto b/vendor/github.com/containerd/containerd/api/events/task.proto new file mode 100644 index 000000000..d69921365 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/task.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; + +package containerd.events; + +import weak "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/containerd/containerd/api/types/mount.proto"; +import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; + +option go_package = "github.com/containerd/containerd/api/events;events"; +option (containerd.plugin.fieldpath_all) = true; + +message TaskCreate { + string container_id = 1; + string bundle = 2; + repeated containerd.types.Mount rootfs = 3; + TaskIO io = 4 [(gogoproto.customname) = "IO"]; + string checkpoint = 5; + uint32 pid = 6; +} + +message TaskStart { + string container_id = 1; + uint32 pid = 2; +} + +message TaskDelete { + string container_id = 1; + uint32 pid = 2; + uint32 exit_status = 3; + google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message TaskIO { + string stdin = 1; + string stdout = 2; + string stderr = 3; + bool terminal = 4; +} + +message TaskExit { + string container_id = 1; + string id = 2; + uint32 pid = 3; + uint32 exit_status = 4; + google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message TaskOOM { + string container_id = 1; +} + +message TaskExecAdded { + string container_id = 1; + string exec_id = 2; +} + +message TaskExecStarted { + string container_id = 1; + string exec_id = 2; + uint32 pid = 3; +} + +message TaskPaused { + string container_id = 1; +} + +message TaskResumed { + string container_id = 1; +} + +message TaskCheckpointed { + string container_id = 1; + string checkpoint = 2; +} diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go new file mode 100644 index 000000000..ec08c3b23 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go @@ -0,0 +1,4447 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/content/v1/content.proto + +/* + Package content is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/services/content/v1/content.proto + + It has these top-level messages: + Info + InfoRequest + InfoResponse + UpdateRequest + UpdateResponse + ListContentRequest + ListContentResponse + DeleteContentRequest + ReadContentRequest + ReadContentResponse + Status + StatusRequest + StatusResponse + ListStatusesRequest + ListStatusesResponse + WriteContentRequest + WriteContentResponse + AbortRequest +*/ +package content + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/types" +import google_protobuf3 "github.com/gogo/protobuf/types" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" +import time "time" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// WriteAction defines the behavior of a WriteRequest. +type WriteAction int32 + +const ( + // WriteActionStat instructs the writer to return the current status while + // holding the lock on the write. + WriteActionStat WriteAction = 0 + // WriteActionWrite sets the action for the write request to write data. + // + // Any data included will be written at the provided offset. The + // transaction will be left open for further writes. + // + // This is the default. + WriteActionWrite WriteAction = 1 + // WriteActionCommit will write any outstanding data in the message and + // commit the write, storing it under the digest. + // + // This can be used in a single message to send the data, verify it and + // commit it. + // + // This action will always terminate the write. + WriteActionCommit WriteAction = 2 +) + +var WriteAction_name = map[int32]string{ + 0: "STAT", + 1: "WRITE", + 2: "COMMIT", +} +var WriteAction_value = map[string]int32{ + "STAT": 0, + "WRITE": 1, + "COMMIT": 2, +} + +func (x WriteAction) String() string { + return proto.EnumName(WriteAction_name, int32(x)) +} +func (WriteAction) EnumDescriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} } + +type Info struct { + // Digest is the hash identity of the blob. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // Size is the total number of bytes in the blob. + Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + // CreatedAt provides the time at which the blob was committed. + CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,stdtime" json:"created_at"` + // UpdatedAt provides the time the info was last updated. + UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"` + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Info) Reset() { *m = Info{} } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} } + +type InfoRequest struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{1} } + +type InfoResponse struct { + Info Info `protobuf:"bytes,1,opt,name=info" json:"info"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{2} } + +type UpdateRequest struct { + Info Info `protobuf:"bytes,1,opt,name=info" json:"info"` + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + // + // In info, Digest, Size, and CreatedAt are immutable, + // other field may be updated using this mask. + // If no mask is provided, all mutable field are updated. + UpdateMask *google_protobuf1.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } +func (*UpdateRequest) ProtoMessage() {} +func (*UpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{3} } + +type UpdateResponse struct { + Info Info `protobuf:"bytes,1,opt,name=info" json:"info"` +} + +func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } +func (*UpdateResponse) ProtoMessage() {} +func (*UpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{4} } + +type ListContentRequest struct { + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, containers that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *ListContentRequest) Reset() { *m = ListContentRequest{} } +func (*ListContentRequest) ProtoMessage() {} +func (*ListContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{5} } + +type ListContentResponse struct { + Info []Info `protobuf:"bytes,1,rep,name=info" json:"info"` +} + +func (m *ListContentResponse) Reset() { *m = ListContentResponse{} } +func (*ListContentResponse) ProtoMessage() {} +func (*ListContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{6} } + +type DeleteContentRequest struct { + // Digest specifies which content to delete. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` +} + +func (m *DeleteContentRequest) Reset() { *m = DeleteContentRequest{} } +func (*DeleteContentRequest) ProtoMessage() {} +func (*DeleteContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{7} } + +// ReadContentRequest defines the fields that make up a request to read a portion of +// data from a stored object. +type ReadContentRequest struct { + // Digest is the hash identity to read. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // Offset specifies the number of bytes from the start at which to begin + // the read. If zero or less, the read will be from the start. This uses + // standard zero-indexed semantics. + Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + // size is the total size of the read. If zero, the entire blob will be + // returned by the service. + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` +} + +func (m *ReadContentRequest) Reset() { *m = ReadContentRequest{} } +func (*ReadContentRequest) ProtoMessage() {} +func (*ReadContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{8} } + +// ReadContentResponse carries byte data for a read request. +type ReadContentResponse struct { + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ReadContentResponse) Reset() { *m = ReadContentResponse{} } +func (*ReadContentResponse) ProtoMessage() {} +func (*ReadContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{9} } + +type Status struct { + StartedAt time.Time `protobuf:"bytes,1,opt,name=started_at,json=startedAt,stdtime" json:"started_at"` + UpdatedAt time.Time `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"` + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{10} } + +type StatusRequest struct { + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{11} } + +type StatusResponse struct { + Status *Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{12} } + +type ListStatusesRequest struct { + Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *ListStatusesRequest) Reset() { *m = ListStatusesRequest{} } +func (*ListStatusesRequest) ProtoMessage() {} +func (*ListStatusesRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{13} } + +type ListStatusesResponse struct { + Statuses []Status `protobuf:"bytes,1,rep,name=statuses" json:"statuses"` +} + +func (m *ListStatusesResponse) Reset() { *m = ListStatusesResponse{} } +func (*ListStatusesResponse) ProtoMessage() {} +func (*ListStatusesResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{14} } + +// WriteContentRequest writes data to the request ref at offset. +type WriteContentRequest struct { + // Action sets the behavior of the write. + // + // When this is a write and the ref is not yet allocated, the ref will be + // allocated and the data will be written at offset. + // + // If the action is write and the ref is allocated, it will accept data to + // an offset that has not yet been written. + // + // If the action is write and there is no data, the current write status + // will be returned. This works differently from status because the stream + // holds a lock. + Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` + // Ref identifies the pre-commit object to write to. + Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"` + // Total can be set to have the service validate the total size of the + // committed content. + // + // The latest value before or with the commit action message will be use to + // validate the content. If the offset overflows total, the service may + // report an error. It is only required on one message for the write. + // + // If the value is zero or less, no validation of the final content will be + // performed. + Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` + // Expected can be set to have the service validate the final content against + // the provided digest. + // + // If the digest is already present in the object store, an AlreadyExists + // error will be returned. + // + // Only the latest version will be used to check the content against the + // digest. It is only required to include it on a single message, before or + // with the commit action message. + Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` + // Offset specifies the number of bytes from the start at which to begin + // the write. For most implementations, this means from the start of the + // file. This uses standard, zero-indexed semantics. + // + // If the action is write, the remote may remove all previously written + // data after the offset. Implementations may support arbitrary offsets but + // MUST support reseting this value to zero with a write. If an + // implementation does not support a write at a particular offset, an + // OutOfRange error must be returned. + Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"` + // Data is the actual bytes to be written. + // + // If this is empty and the message is not a commit, a response will be + // returned with the current write state. + Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *WriteContentRequest) Reset() { *m = WriteContentRequest{} } +func (*WriteContentRequest) ProtoMessage() {} +func (*WriteContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{15} } + +// WriteContentResponse is returned on the culmination of a write call. +type WriteContentResponse struct { + // Action contains the action for the final message of the stream. A writer + // should confirm that they match the intended result. + Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` + // StartedAt provides the time at which the write began. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,stdtime" json:"started_at"` + // UpdatedAt provides the last time of a successful write. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"` + // Offset is the current committed size for the write. + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + // Total provides the current, expected total size of the write. + // + // We include this to provide consistency with the Status structure on the + // client writer. + // + // This is only valid on the Stat and Commit response. + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + // Digest, if present, includes the digest up to the currently committed + // bytes. If action is commit, this field will be set. It is implementation + // defined if this is set for other actions. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` +} + +func (m *WriteContentResponse) Reset() { *m = WriteContentResponse{} } +func (*WriteContentResponse) ProtoMessage() {} +func (*WriteContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{16} } + +type AbortRequest struct { + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` +} + +func (m *AbortRequest) Reset() { *m = AbortRequest{} } +func (*AbortRequest) ProtoMessage() {} +func (*AbortRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{17} } + +func init() { + proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info") + proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse") + proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest") + proto.RegisterType((*UpdateResponse)(nil), "containerd.services.content.v1.UpdateResponse") + proto.RegisterType((*ListContentRequest)(nil), "containerd.services.content.v1.ListContentRequest") + proto.RegisterType((*ListContentResponse)(nil), "containerd.services.content.v1.ListContentResponse") + proto.RegisterType((*DeleteContentRequest)(nil), "containerd.services.content.v1.DeleteContentRequest") + proto.RegisterType((*ReadContentRequest)(nil), "containerd.services.content.v1.ReadContentRequest") + proto.RegisterType((*ReadContentResponse)(nil), "containerd.services.content.v1.ReadContentResponse") + proto.RegisterType((*Status)(nil), "containerd.services.content.v1.Status") + proto.RegisterType((*StatusRequest)(nil), "containerd.services.content.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "containerd.services.content.v1.StatusResponse") + proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest") + proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse") + proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest") + proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse") + proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest") + proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Content service + +type ContentClient interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) + // Delete will delete the referenced object. + Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) + // Status returns the status for a single reference. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) +} + +type contentClient struct { + cc *grpc.ClientConn +} + +func NewContentClient(cc *grpc.ClientConn) ContentClient { + return &contentClient{cc} +} + +func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { + out := new(UpdateResponse) + err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[0], c.cc, "/containerd.services.content.v1.Content/List", opts...) + if err != nil { + return nil, err + } + x := &contentListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ListClient interface { + Recv() (*ListContentResponse, error) + grpc.ClientStream +} + +type contentListClient struct { + grpc.ClientStream +} + +func (x *contentListClient) Recv() (*ListContentResponse, error) { + m := new(ListContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[1], c.cc, "/containerd.services.content.v1.Content/Read", opts...) + if err != nil { + return nil, err + } + x := &contentReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ReadClient interface { + Recv() (*ReadContentResponse, error) + grpc.ClientStream +} + +type contentReadClient struct { + grpc.ClientStream +} + +func (x *contentReadClient) Recv() (*ReadContentResponse, error) { + m := new(ReadContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { + out := new(ListStatusesResponse) + err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[2], c.cc, "/containerd.services.content.v1.Content/Write", opts...) + if err != nil { + return nil, err + } + x := &contentWriteClient{stream} + return x, nil +} + +type Content_WriteClient interface { + Send(*WriteContentRequest) error + Recv() (*WriteContentResponse, error) + grpc.ClientStream +} + +type contentWriteClient struct { + grpc.ClientStream +} + +func (x *contentWriteClient) Send(m *WriteContentRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { + m := new(WriteContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Content service + +type ContentServer interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(context.Context, *InfoRequest) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(context.Context, *UpdateRequest) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(*ListContentRequest, Content_ListServer) error + // Delete will delete the referenced object. + Delete(context.Context, *DeleteContentRequest) (*google_protobuf3.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(*ReadContentRequest, Content_ReadServer) error + // Status returns the status for a single reference. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(Content_WriteServer) error + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(context.Context, *AbortRequest) (*google_protobuf3.Empty, error) +} + +func RegisterContentServer(s *grpc.Server, srv ContentServer) { + s.RegisterService(&_Content_serviceDesc, srv) +} + +func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).List(m, &contentListServer{stream}) +} + +type Content_ListServer interface { + Send(*ListContentResponse) error + grpc.ServerStream +} + +type contentListServer struct { + grpc.ServerStream +} + +func (x *contentListServer) Send(m *ListContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).Read(m, &contentReadServer{stream}) +} + +type Content_ReadServer interface { + Send(*ReadContentResponse) error + grpc.ServerStream +} + +type contentReadServer struct { + grpc.ServerStream +} + +func (x *contentReadServer) Send(m *ReadContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListStatusesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).ListStatuses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/ListStatuses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ContentServer).Write(&contentWriteServer{stream}) +} + +type Content_WriteServer interface { + Send(*WriteContentResponse) error + Recv() (*WriteContentRequest, error) + grpc.ServerStream +} + +type contentWriteServer struct { + grpc.ServerStream +} + +func (x *contentWriteServer) Send(m *WriteContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { + m := new(WriteContentRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AbortRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Abort(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Abort", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Content_serviceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.content.v1.Content", + HandlerType: (*ContentServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Content_Info_Handler, + }, + { + MethodName: "Update", + Handler: _Content_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Content_Delete_Handler, + }, + { + MethodName: "Status", + Handler: _Content_Status_Handler, + }, + { + MethodName: "ListStatuses", + Handler: _Content_ListStatuses_Handler, + }, + { + MethodName: "Abort", + Handler: _Content_Abort_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "List", + Handler: _Content_List_Handler, + ServerStreams: true, + }, + { + StreamName: "Read", + Handler: _Content_Read_Handler, + ServerStreams: true, + }, + { + StreamName: "Write", + Handler: _Content_Write_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", +} + +func (m *Info) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Size_ != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Size_)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt))) + n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x22 + i++ + i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt))) + n2, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x2a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) + i = encodeVarintContent(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + return i, nil +} + +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Info.Size())) + n3, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *UpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Info.Size())) + n4, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.UpdateMask != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.UpdateMask.Size())) + n5, err := m.UpdateMask.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *UpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Info.Size())) + n6, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *ListContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListContentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Info) > 0 { + for _, msg := range m.Info { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + return i, nil +} + +func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Offset != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + } + if m.Size_ != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Size_)) + } + return i, nil +} + +func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Offset != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.StartedAt))) + n7, err := types.StdTimeMarshalTo(m.StartedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt))) + n8, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Ref) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + if m.Offset != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + } + if m.Total != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Total)) + } + if len(m.Expected) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) + i += copy(dAtA[i:], m.Expected) + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Status.Size())) + n9, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Statuses) > 0 { + for _, msg := range m.Statuses { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Action)) + } + if len(m.Ref) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + if m.Total != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Total)) + } + if len(m.Expected) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) + i += copy(dAtA[i:], m.Expected) + } + if m.Offset != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x3a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) + i = encodeVarintContent(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Action)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.StartedAt))) + n10, err := types.StdTimeMarshalTo(m.StartedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt))) + n11, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + if m.Offset != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + } + if m.Total != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintContent(dAtA, i, uint64(m.Total)) + } + if len(m.Digest) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + return i, nil +} + +func (m *AbortRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + return i, nil +} + +func encodeVarintContent(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Info) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovContent(uint64(m.Size_)) + } + l = types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovContent(uint64(l)) + l = types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContent(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) + n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) + } + } + return n +} + +func (m *InfoRequest) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *InfoResponse) Size() (n int) { + var l int + _ = l + l = m.Info.Size() + n += 1 + l + sovContent(uint64(l)) + return n +} + +func (m *UpdateRequest) Size() (n int) { + var l int + _ = l + l = m.Info.Size() + n += 1 + l + sovContent(uint64(l)) + if m.UpdateMask != nil { + l = m.UpdateMask.Size() + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *UpdateResponse) Size() (n int) { + var l int + _ = l + l = m.Info.Size() + n += 1 + l + sovContent(uint64(l)) + return n +} + +func (m *ListContentRequest) Size() (n int) { + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovContent(uint64(l)) + } + } + return n +} + +func (m *ListContentResponse) Size() (n int) { + var l int + _ = l + if len(m.Info) > 0 { + for _, e := range m.Info { + l = e.Size() + n += 1 + l + sovContent(uint64(l)) + } + } + return n +} + +func (m *DeleteContentRequest) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *ReadContentRequest) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + if m.Size_ != 0 { + n += 1 + sovContent(uint64(m.Size_)) + } + return n +} + +func (m *ReadContentResponse) Size() (n int) { + var l int + _ = l + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + l = types.SizeOfStdTime(m.StartedAt) + n += 1 + l + sovContent(uint64(l)) + l = types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContent(uint64(l)) + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + if m.Total != 0 { + n += 1 + sovContent(uint64(m.Total)) + } + l = len(m.Expected) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *ListStatusesRequest) Size() (n int) { + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovContent(uint64(l)) + } + } + return n +} + +func (m *ListStatusesResponse) Size() (n int) { + var l int + _ = l + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovContent(uint64(l)) + } + } + return n +} + +func (m *WriteContentRequest) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovContent(uint64(m.Action)) + } + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Total != 0 { + n += 1 + sovContent(uint64(m.Total)) + } + l = len(m.Expected) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) + n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) + } + } + return n +} + +func (m *WriteContentResponse) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovContent(uint64(m.Action)) + } + l = types.SizeOfStdTime(m.StartedAt) + n += 1 + l + sovContent(uint64(l)) + l = types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContent(uint64(l)) + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + if m.Total != 0 { + n += 1 + sovContent(uint64(m.Total)) + } + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func (m *AbortRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + return n +} + +func sovContent(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozContent(x uint64) (n int) { + return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Info) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Info{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *InfoRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfoRequest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `}`, + }, "") + return s +} +func (this *InfoResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfoResponse{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateRequest{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, + `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf1.FieldMask", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateResponse{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListContentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContentRequest{`, + `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, + `}`, + }, "") + return s +} +func (this *ListContentResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContentResponse{`, + `Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteContentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteContentRequest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `}`, + }, "") + return s +} +func (this *ReadContentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReadContentRequest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `}`, + }, "") + return s +} +func (this *ReadContentResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReadContentResponse{`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, + `}`, + }, "") + return s +} +func (this *StatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusRequest{`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `}`, + }, "") + return s +} +func (this *StatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListStatusesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListStatusesRequest{`, + `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, + `}`, + }, "") + return s +} +func (this *ListStatusesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListStatusesResponse{`, + `Statuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statuses), "Status", "Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WriteContentRequest) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&WriteContentRequest{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *WriteContentResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteContentResponse{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `}`, + }, "") + return s +} +func (this *AbortRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AbortRequest{`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `}`, + }, "") + return s +} +func valueToStringContent(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Info) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateMask == nil { + m.UpdateMask = &google_protobuf1.FieldMask{} + } + if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = append(m.Info, Info{}) + if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadContentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadContentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListStatusesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListStatusesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListStatusesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListStatusesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, Status{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WriteAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteContentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteContentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WriteAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AbortRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AbortRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AbortRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipContent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthContent + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipContent(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptorContent) +} + +var fileDescriptorContent = []byte{ + // 1081 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42, + 0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b, + 0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17, + 0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68, + 0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99, + 0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d, + 0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d, + 0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5, + 0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58, + 0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14, + 0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26, + 0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc, + 0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9, + 0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e, + 0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09, + 0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, + 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69, + 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35, + 0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3, + 0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf, + 0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7, + 0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9, + 0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4, + 0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3, + 0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59, + 0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58, + 0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b, + 0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c, + 0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3, + 0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f, + 0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59, + 0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50, + 0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a, + 0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc, + 0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6, + 0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57, + 0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26, + 0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1, + 0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90, + 0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2, + 0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20, + 0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1, + 0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70, + 0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f, + 0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb, + 0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34, + 0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e, + 0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9, + 0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28, + 0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16, + 0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39, + 0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7, + 0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3, + 0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c, + 0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d, + 0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8, + 0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b, + 0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb, + 0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19, + 0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3, + 0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4, + 0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c, + 0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42, + 0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7, + 0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd, + 0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto new file mode 100644 index 000000000..086b3e39b --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto @@ -0,0 +1,318 @@ +syntax = "proto3"; + +package containerd.services.content.v1; + +import weak "gogoproto/gogo.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/containerd/containerd/api/services/content/v1;content"; + +// Content provides access to a content addressable storage system. +service Content { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + rpc Info(InfoRequest) returns (InfoResponse); + + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + rpc Update(UpdateRequest) returns (UpdateResponse); + + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + rpc List(ListContentRequest) returns (stream ListContentResponse); + + // Delete will delete the referenced object. + rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty); + + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + rpc Read(ReadContentRequest) returns (stream ReadContentResponse); + + // Status returns the status for a single reference. + rpc Status(StatusRequest) returns (StatusResponse); + + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse); + + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse); + + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + rpc Abort(AbortRequest) returns (google.protobuf.Empty); +} + +message Info { + // Digest is the hash identity of the blob. + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + + // Size is the total number of bytes in the blob. + int64 size = 2; + + // CreatedAt provides the time at which the blob was committed. + google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // UpdatedAt provides the time the info was last updated. + google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + map labels = 5; +} + +message InfoRequest { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +message InfoResponse { + Info info = 1 [(gogoproto.nullable) = false]; +} + +message UpdateRequest { + Info info = 1 [(gogoproto.nullable) = false]; + + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + // + // In info, Digest, Size, and CreatedAt are immutable, + // other field may be updated using this mask. + // If no mask is provided, all mutable field are updated. + google.protobuf.FieldMask update_mask = 2; +} + +message UpdateResponse { + Info info = 1 [(gogoproto.nullable) = false]; +} + +message ListContentRequest { + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, containers that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + repeated string filters = 1; +} + +message ListContentResponse { + repeated Info info = 1 [(gogoproto.nullable) = false]; +} + +message DeleteContentRequest { + // Digest specifies which content to delete. + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +// ReadContentRequest defines the fields that make up a request to read a portion of +// data from a stored object. +message ReadContentRequest { + // Digest is the hash identity to read. + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + + // Offset specifies the number of bytes from the start at which to begin + // the read. If zero or less, the read will be from the start. This uses + // standard zero-indexed semantics. + int64 offset = 2; + + // size is the total size of the read. If zero, the entire blob will be + // returned by the service. + int64 size = 3; +} + +// ReadContentResponse carries byte data for a read request. +message ReadContentResponse { + int64 offset = 1; // offset of the returned data + bytes data = 2; // actual data +} + +message Status { + google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string ref = 3; + int64 offset = 4; + int64 total = 5; + string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + + +message StatusRequest { + string ref = 1; +} + +message StatusResponse { + Status status = 1; +} + +message ListStatusesRequest { + repeated string filters = 1; +} + +message ListStatusesResponse { + repeated Status statuses = 1 [(gogoproto.nullable) = false]; +} + +// WriteAction defines the behavior of a WriteRequest. +enum WriteAction { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "WriteAction"; + + // WriteActionStat instructs the writer to return the current status while + // holding the lock on the write. + STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"]; + + // WriteActionWrite sets the action for the write request to write data. + // + // Any data included will be written at the provided offset. The + // transaction will be left open for further writes. + // + // This is the default. + WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"]; + + // WriteActionCommit will write any outstanding data in the message and + // commit the write, storing it under the digest. + // + // This can be used in a single message to send the data, verify it and + // commit it. + // + // This action will always terminate the write. + COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"]; +} + +// WriteContentRequest writes data to the request ref at offset. +message WriteContentRequest { + // Action sets the behavior of the write. + // + // When this is a write and the ref is not yet allocated, the ref will be + // allocated and the data will be written at offset. + // + // If the action is write and the ref is allocated, it will accept data to + // an offset that has not yet been written. + // + // If the action is write and there is no data, the current write status + // will be returned. This works differently from status because the stream + // holds a lock. + WriteAction action = 1; + + // Ref identifies the pre-commit object to write to. + string ref = 2; + + // Total can be set to have the service validate the total size of the + // committed content. + // + // The latest value before or with the commit action message will be use to + // validate the content. If the offset overflows total, the service may + // report an error. It is only required on one message for the write. + // + // If the value is zero or less, no validation of the final content will be + // performed. + int64 total = 3; + + // Expected can be set to have the service validate the final content against + // the provided digest. + // + // If the digest is already present in the object store, an AlreadyExists + // error will be returned. + // + // Only the latest version will be used to check the content against the + // digest. It is only required to include it on a single message, before or + // with the commit action message. + string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + + // Offset specifies the number of bytes from the start at which to begin + // the write. For most implementations, this means from the start of the + // file. This uses standard, zero-indexed semantics. + // + // If the action is write, the remote may remove all previously written + // data after the offset. Implementations may support arbitrary offsets but + // MUST support reseting this value to zero with a write. If an + // implementation does not support a write at a particular offset, an + // OutOfRange error must be returned. + int64 offset = 5; + + // Data is the actual bytes to be written. + // + // If this is empty and the message is not a commit, a response will be + // returned with the current write state. + bytes data = 6; + + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + map labels = 7; +} + +// WriteContentResponse is returned on the culmination of a write call. +message WriteContentResponse { + // Action contains the action for the final message of the stream. A writer + // should confirm that they match the intended result. + WriteAction action = 1; + + // StartedAt provides the time at which the write began. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // UpdatedAt provides the last time of a successful write. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // Offset is the current committed size for the write. + int64 offset = 4; + + // Total provides the current, expected total size of the write. + // + // We include this to provide consistency with the Status structure on the + // client writer. + // + // This is only valid on the Stat and Commit response. + int64 total = 5; + + // Digest, if present, includes the digest up to the currently committed + // bytes. If action is commit, this field will be set. It is implementation + // defined if this is set for other actions. + string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +message AbortRequest { + string ref = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go new file mode 100644 index 000000000..93e88c0dc --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go @@ -0,0 +1,410 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/types/descriptor.proto + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/types/descriptor.proto + github.com/containerd/containerd/api/types/metrics.proto + github.com/containerd/containerd/api/types/mount.proto + github.com/containerd/containerd/api/types/platform.proto + + It has these top-level messages: + Descriptor + Metric + Mount + Platform +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Descriptor describes a blob in a content store. +// +// This descriptor can be used to reference content from an +// oci descriptor found in a manifest. +// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor +type Descriptor struct { + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` +} + +func (m *Descriptor) Reset() { *m = Descriptor{} } +func (*Descriptor) ProtoMessage() {} +func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } + +func init() { + proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor") +} +func (m *Descriptor) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MediaType) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType))) + i += copy(dAtA[i:], m.MediaType) + } + if len(m.Digest) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Size_ != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_)) + } + return i, nil +} + +func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Descriptor) Size() (n int) { + var l int + _ = l + l = len(m.MediaType) + if l > 0 { + n += 1 + l + sovDescriptor(uint64(l)) + } + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovDescriptor(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovDescriptor(uint64(m.Size_)) + } + return n +} + +func sovDescriptor(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDescriptor(x uint64) (n int) { + return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Descriptor) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Descriptor{`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `}`, + }, "") + return s +} +func valueToStringDescriptor(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Descriptor) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Descriptor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDescriptor + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDescriptor + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDescriptor(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDescriptor + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDescriptor(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDescriptor + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDescriptor(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor) +} + +var fileDescriptorDescriptor = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, + 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, + 0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, + 0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94, + 0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48, + 0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48, + 0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90, + 0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41, + 0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e, + 0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, + 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40, + 0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto new file mode 100644 index 000000000..5c00dca4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package containerd.types; + +import weak "gogoproto/gogo.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Descriptor describes a blob in a content store. +// +// This descriptor can be used to reference content from an +// oci descriptor found in a manifest. +// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor +message Descriptor { + string media_type = 1; + string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + int64 size = 3; +} diff --git a/vendor/github.com/containerd/containerd/api/types/doc.go b/vendor/github.com/containerd/containerd/api/types/doc.go new file mode 100644 index 000000000..475b465ed --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go new file mode 100644 index 000000000..52e9f40a5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go @@ -0,0 +1,412 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/types/metrics.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import types1 "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type Metric struct { + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } + +func init() { + proto.RegisterType((*Metric)(nil), "containerd.types.Metric") +} +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(types1.SizeOfStdTime(m.Timestamp))) + n1, err := types1.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Metric) Size() (n int) { + var l int + _ = l + l = types1.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovMetrics(uint64(l)) + l = len(m.ID) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} + +func sovMetrics(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metric{`, + `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types1.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &google_protobuf1.Any{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMetrics(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics) +} + +var fileDescriptorMetrics = []byte{ + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, + 0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81, + 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64, + 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09, + 0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50, + 0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54, + 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70, + 0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e, + 0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c, + 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, + 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, + 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, + 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24, + 0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto new file mode 100644 index 000000000..0e631d2ac --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package containerd.types; + +import weak "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +message Metric { + google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string id = 2; + google.protobuf.Any data = 3; +} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go new file mode 100644 index 000000000..f7a9c3c1f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go @@ -0,0 +1,456 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/types/mount.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Mount describes mounts for a container. +// +// This type is the lingua franca of ContainerD. All services provide mounts +// to be used with the container at creation time. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target and options. +type Mount struct { + // Type defines the nature of the mount. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Target path in container + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // Options specifies zero or more fstab style mount options. + Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} } + +func init() { + proto.RegisterType((*Mount)(nil), "containerd.types.Mount") +} +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMount(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMount(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Target) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMount(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if len(m.Options) > 0 { + for _, s := range m.Options { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintMount(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Mount) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovMount(uint64(l)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovMount(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovMount(uint64(l)) + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovMount(uint64(l)) + } + } + return n +} + +func sovMount(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMount(x uint64) (n int) { + return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func valueToStringMount(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMount(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMount + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMount(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMount = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount) +} + +var fileDescriptorMount = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, + 0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab, + 0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, + 0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85, + 0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48, + 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, + 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, + 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, + 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto new file mode 100644 index 000000000..cd80e44a2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/mount.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package containerd.types; + +import weak "gogoproto/gogo.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Mount describes mounts for a container. +// +// This type is the lingua franca of ContainerD. All services provide mounts +// to be used with the container at creation time. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target and options. +message Mount { + // Type defines the nature of the mount. + string type = 1; + + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + string source = 2; + + // Target path in container + string target = 3; + + // Options specifies zero or more fstab style mount options. + repeated string options = 4; +} diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go new file mode 100644 index 000000000..ba9a3bf88 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go @@ -0,0 +1,394 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/types/platform.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Platform follows the structure of the OCI platform specification, from +// descriptors. +type Platform struct { + OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` + Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} } + +func init() { + proto.RegisterType((*Platform)(nil), "containerd.types.Platform") +} +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.OS) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + if len(m.Architecture) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + } + if len(m.Variant) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant))) + i += copy(dAtA[i:], m.Variant) + } + return i, nil +} + +func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Platform) Size() (n int) { + var l int + _ = l + l = len(m.OS) + if l > 0 { + n += 1 + l + sovPlatform(uint64(l)) + } + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovPlatform(uint64(l)) + } + l = len(m.Variant) + if l > 0 { + n += 1 + l + sovPlatform(uint64(l)) + } + return n +} + +func sovPlatform(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlatform(x uint64) (n int) { + return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Platform) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Platform{`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, + `}`, + }, "") + return s +} +func valueToStringPlatform(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlatform + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlatform + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlatform + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlatform(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlatform + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlatform(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlatform + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlatform(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform) +} + +var fileDescriptorPlatform = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, + 0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9, + 0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5, + 0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94, + 0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98, + 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, + 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, + 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto new file mode 100644 index 000000000..4cf9834bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/platform.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package containerd.types; + +import weak "gogoproto/gogo.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Platform follows the structure of the OCI platform specification, from +// descriptors. +message Platform { + string os = 1 [(gogoproto.customname) = "OS"]; + string architecture = 2; + string variant = 3; +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index bd64e0353..60c80e98a 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -92,6 +92,36 @@ func (w *writeCloserWrapper) Close() error { return nil } +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ @@ -110,8 +140,7 @@ func DetectCompression(source []byte) Compression { // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { - buf := bufioReader32KPool.Get().(*bufio.Reader) - buf.Reset(archive) + buf := newBufferedReader(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd @@ -123,15 +152,12 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { return nil, err } - closer := func() error { - buf.Reset(nil) - bufioReader32KPool.Put(buf) - return nil - } switch compression := DetectCompression(bs); compression { case Uncompressed: - readBufWrapper := &readCloserWrapper{buf, compression, closer} - return readBufWrapper, nil + return &readCloserWrapper{ + Reader: buf, + compression: compression, + }, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) gzReader, err := gzipDecompress(ctx, buf) @@ -140,12 +166,15 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { return nil, err } - readBufWrapper := &readCloserWrapper{gzReader, compression, func() error { - cancel() - return closer() - }} + return &readCloserWrapper{ + Reader: gzReader, + compression: compression, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil - return readBufWrapper, nil default: return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) } diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index ac04e3e02..0d9e0e750 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -100,7 +100,7 @@ const ( // readdir calls to this directory do not follow to lower layers. whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" - paxSchilyXattr = "SCHILY.xattrs." + paxSchilyXattr = "SCHILY.xattr." ) // Apply applies a tar stream of an OCI style diff tar. @@ -295,7 +295,7 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") + return 0, fmt.Errorf("invalid aufs hardlink") } p, err := fs.RootPath(aufsTempdir, linkBasename) if err != nil { diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index a3f585ac8..b97631fcc 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -74,7 +74,7 @@ func tarName(p string) (string, error) { // in file names, it is mostly safe to replace however we must // check just in case if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) + return "", fmt.Errorf("windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil @@ -130,11 +130,7 @@ func skipFile(hdr *tar.Header) bool { // specific or Linux-specific, this warning should be changed to an error // to cater for the situation where someone does manage to upload a Linux // image but have it tagged as Windows inadvertently. - if strings.Contains(hdr.Name, ":") { - return true - } - - return false + return strings.Contains(hdr.Name, ":") } // handleTarTypeBlockCharFifo is an OS-specific helper function used by diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index 4a69cb7d0..fd8d98bf3 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd solaris +// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go index aabf4c8f3..d8141a68b 100644 --- a/vendor/github.com/containerd/containerd/content/content.go +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -110,8 +110,9 @@ type IngestManager interface { // Writer handles the write of content into a content store type Writer interface { - // Close is expected to be called after Commit() when commission is needed. - // Closing a writer without commit allows resuming or aborting. + // Close closes the writer, if the writer has not been + // committed this allows resuming or aborting. + // Calling Close on a closed writer will not error. io.WriteCloser // Digest may return empty digest or panics until committed. @@ -119,6 +120,8 @@ type Writer interface { // Commit commits the blob (but no roll-back is guaranteed on an error). // size and expected can be zero-value when unknown. + // Commit always closes the writer, even on error. + // ErrAlreadyExists aborts the writer. Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error // Status returns the current state of write diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go index 411c29a9d..bc3bd18e0 100644 --- a/vendor/github.com/containerd/containerd/content/local/locks.go +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -47,7 +47,5 @@ func unlock(ref string) { locksMu.Lock() defer locksMu.Unlock() - if _, ok := locks[ref]; ok { - delete(locks, ref) - } + delete(locks, ref) } diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index 7fa9bb736..996724057 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -524,12 +524,11 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di if err != nil { return nil, err } - defer fp.Close() p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - offset, err = io.CopyBuffer(digester.Hash(), fp, *p) + bufPool.Put(p) + fp.Close() if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go index 10df4a4c5..223b14544 100644 --- a/vendor/github.com/containerd/containerd/content/local/writer.go +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -80,43 +81,36 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, } } - if w.fp == nil { + // Ensure even on error the writer is fully closed + defer unlock(w.ref) + fp := w.fp + w.fp = nil + + if fp == nil { return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") } - if err := w.fp.Sync(); err != nil { + if err := fp.Sync(); err != nil { + fp.Close() return errors.Wrap(err, "sync failed") } - fi, err := w.fp.Stat() + fi, err := fp.Stat() + closeErr := fp.Close() if err != nil { return errors.Wrap(err, "stat on ingest file failed") } - - // change to readonly, more important for read, but provides _some_ - // protection from this point on. We use the existing perms with a mask - // only allowing reads honoring the umask on creation. - // - // This removes write and exec, only allowing read per the creation umask. - // - // NOTE: Windows does not support this operation - if runtime.GOOS != "windows" { - if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil { - return errors.Wrap(err, "failed to change ingest file permissions") - } + if closeErr != nil { + return errors.Wrap(err, "failed to close ingest file") } if size > 0 && size != fi.Size() { - return errors.Errorf("unexpected commit size %d, expected %d", fi.Size(), size) - } - - if err := w.fp.Close(); err != nil { - return errors.Wrap(err, "failed closing ingest") + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) } dgst := w.digester.Digest() if expected != "" && expected != dgst { - return errors.Errorf("unexpected commit digest %s, expected %s", dgst, expected) + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) } var ( @@ -129,27 +123,48 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, return err } - // clean up!! - defer os.RemoveAll(w.path) - if _, err := os.Stat(target); err == nil { // collision with the target file! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + } return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) } + if err := os.Rename(ingest, target); err != nil { return err } + + // Ingest has now been made available in the content store, attempt to complete + // setting metadata but errors should only be logged and not returned since + // the content store cannot be cleanly rolled back. + commitTime := time.Now() if err := os.Chtimes(target, commitTime, commitTime); err != nil { - return err + log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") } - w.fp = nil - unlock(w.ref) + // clean up!! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + } if w.s.ls != nil && base.Labels != nil { if err := w.s.ls.Set(dgst, base.Labels); err != nil { - return err + log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") + } + } + + // change to readonly, more important for read, but provides _some_ + // protection from this point on. We use the existing perms with a mask + // only allowing reads honoring the umask on creation. + // + // This removes write and exec, only allowing read per the creation umask. + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { + log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") } } diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_reader.go b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go new file mode 100644 index 000000000..b06e48fa9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go @@ -0,0 +1,65 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + contentapi "github.com/containerd/containerd/api/services/content/v1" + digest "github.com/opencontainers/go-digest" +) + +type remoteReaderAt struct { + ctx context.Context + digest digest.Digest + size int64 + client contentapi.ContentClient +} + +func (ra *remoteReaderAt) Size() int64 { + return ra.size +} + +func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + rr := &contentapi.ReadContentRequest{ + Digest: ra.digest, + Offset: off, + Size_: int64(len(p)), + } + rc, err := ra.client.Read(ra.ctx, rr) + if err != nil { + return 0, err + } + + for len(p) > 0 { + var resp *contentapi.ReadContentResponse + // fill our buffer up until we can fill p. + resp, err = rc.Recv() + if err != nil { + return n, err + } + + copied := copy(p, resp.Data) + n += copied + p = p[copied:] + } + return n, nil +} + +func (ra *remoteReaderAt) Close() error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_store.go b/vendor/github.com/containerd/containerd/content/proxy/content_store.go new file mode 100644 index 000000000..217b74651 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/proxy/content_store.go @@ -0,0 +1,234 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + "io" + + contentapi "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + protobuftypes "github.com/gogo/protobuf/types" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type proxyContentStore struct { + client contentapi.ContentClient +} + +// NewContentStore returns a new content store which communicates over a GRPC +// connection using the containerd content GRPC API. +func NewContentStore(client contentapi.ContentClient) content.Store { + return &proxyContentStore{ + client: client, + } +} + +func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{ + Digest: dgst, + }) + if err != nil { + return content.Info{}, errdefs.FromGRPC(err) + } + + return infoFromGRPC(resp.Info), nil +} + +func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + session, err := pcs.client.List(ctx, &contentapi.ListContentRequest{ + Filters: filters, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + + for { + msg, err := session.Recv() + if err != nil { + if err != io.EOF { + return errdefs.FromGRPC(err) + } + + break + } + + for _, info := range msg.Info { + if err := fn(infoFromGRPC(info)); err != nil { + return err + } + } + } + + return nil +} + +func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{ + Digest: dgst, + }); err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +// ReaderAt ignores MediaType. +func (pcs *proxyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + i, err := pcs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + return &remoteReaderAt{ + ctx: ctx, + digest: desc.Digest, + size: i.Size, + client: pcs.client, + }, nil +} + +func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.Status, error) { + resp, err := pcs.client.Status(ctx, &contentapi.StatusRequest{ + Ref: ref, + }) + if err != nil { + return content.Status{}, errdefs.FromGRPC(err) + } + + status := resp.Status + return content.Status{ + Ref: status.Ref, + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }, nil +} + +func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{ + Info: infoToGRPC(info), + UpdateMask: &protobuftypes.FieldMask{ + Paths: fieldpaths, + }, + }) + if err != nil { + return content.Info{}, errdefs.FromGRPC(err) + } + return infoFromGRPC(resp.Info), nil +} + +func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + resp, err := pcs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{ + Filters: filters, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + var statuses []content.Status + for _, status := range resp.Statuses { + statuses = append(statuses, content.Status{ + Ref: status.Ref, + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }) + } + + return statuses, nil +} + +// Writer ignores MediaType. +func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + return &remoteWriter{ + ref: wOpts.Ref, + client: wrclient, + offset: offset, + }, nil +} + +// Abort implements asynchronous abort. It starts a new write session on the ref l +func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error { + if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{ + Ref: ref, + }); err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) { + wrclient, err := pcs.client.Write(ctx) + if err != nil { + return nil, 0, err + } + + if err := wrclient.Send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionStat, + Ref: ref, + Total: size, + Expected: expected, + }); err != nil { + return nil, 0, err + } + + resp, err := wrclient.Recv() + if err != nil { + return nil, 0, err + } + + return wrclient, resp.Offset, nil +} + +func infoToGRPC(info content.Info) contentapi.Info { + return contentapi.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} + +func infoFromGRPC(info contentapi.Info) content.Info { + return content.Info{ + Digest: info.Digest, + Size: info.Size_, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_writer.go b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go new file mode 100644 index 000000000..5434a1568 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go @@ -0,0 +1,139 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + "io" + + contentapi "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type remoteWriter struct { + ref string + client contentapi.Content_WriteClient + offset int64 + digest digest.Digest +} + +// send performs a synchronous req-resp cycle on the client. +func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) { + if err := rw.client.Send(req); err != nil { + return nil, err + } + + resp, err := rw.client.Recv() + + if err == nil { + // try to keep these in sync + if resp.Digest != "" { + rw.digest = resp.Digest + } + } + + return resp, err +} + +func (rw *remoteWriter) Status() (content.Status, error) { + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionStat, + }) + if err != nil { + return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status") + } + + return content.Status{ + Ref: rw.ref, + Offset: resp.Offset, + Total: resp.Total, + StartedAt: resp.StartedAt, + UpdatedAt: resp.UpdatedAt, + }, nil +} + +func (rw *remoteWriter) Digest() digest.Digest { + return rw.digest +} + +func (rw *remoteWriter) Write(p []byte) (n int, err error) { + offset := rw.offset + + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionWrite, + Offset: offset, + Data: p, + }) + if err != nil { + return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write") + } + + n = int(resp.Offset - offset) + if n < len(p) { + err = io.ErrShortWrite + } + + rw.offset += int64(n) + if resp.Digest != "" { + rw.digest = resp.Digest + } + return +} + +func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionCommit, + Total: size, + Offset: rw.offset, + Expected: expected, + Labels: base.Labels, + }) + if err != nil { + return errors.Wrap(errdefs.FromGRPC(err), "commit failed") + } + + if size != 0 && resp.Offset != size { + return errors.Errorf("unexpected size: %v != %v", resp.Offset, size) + } + + if expected != "" && resp.Digest != expected { + return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected) + } + + rw.digest = resp.Digest + rw.offset = resp.Offset + return nil +} + +func (rw *remoteWriter) Truncate(size int64) error { + // This truncation won't actually be validated until a write is issued. + rw.offset = size + return nil +} + +func (rw *remoteWriter) Close() error { + return rw.client.CloseSend() +} diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go index 4619681f4..275a4c3e6 100644 --- a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go @@ -37,10 +37,10 @@ func WithProfile(profile string) oci.SpecOpts { s.Linux.Seccomp = &specs.LinuxSeccomp{} f, err := ioutil.ReadFile(profile) if err != nil { - return fmt.Errorf("Cannot load seccomp profile %q: %v", profile, err) + return fmt.Errorf("cannot load seccomp profile %q: %v", profile, err) } if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil { - return fmt.Errorf("Decoding seccomp profile failed %q: %v", profile, err) + return fmt.Errorf("decoding seccomp profile failed %q: %v", profile, err) } return nil } diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply.go b/vendor/github.com/containerd/containerd/diff/apply/apply.go index e5e77cbdf..d5b4ff45d 100644 --- a/vendor/github.com/containerd/containerd/diff/apply/apply.go +++ b/vendor/github.com/containerd/containerd/diff/apply/apply.go @@ -58,7 +58,7 @@ func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts [ defer func() { if err == nil { log.G(ctx).WithFields(logrus.Fields{ - "d": time.Now().Sub(t1), + "d": time.Since(t1), "dgst": desc.Digest, "size": desc.Size, "media": desc.MediaType, diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go index f06aa016f..a45a5630b 100644 --- a/vendor/github.com/containerd/containerd/diff/walking/differ.go +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -41,6 +41,7 @@ type walkingDiff struct { } var emptyDesc = ocispec.Descriptor{} +var uncompressed = "containerd.io/uncompressed" // NewWalkingDiff is a generic implementation of diff.Comparer. The diff is // calculated by mounting both the upper and lower mount sets and walking the @@ -125,7 +126,7 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if config.Labels == nil { config.Labels = map[string]string{} } - config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() + config.Labels[uncompressed] = dgstr.Digest().String() } else { if err = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); err != nil { return errors.Wrap(err, "failed to write diff") @@ -149,6 +150,14 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o return errors.Wrap(err, "failed to get info from content store") } + // Set uncompressed label if digest already existed without label + if _, ok := info.Labels[uncompressed]; !ok { + info.Labels[uncompressed] = config.Labels[uncompressed] + if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { + return errors.Wrap(err, "error setting uncompressed label") + } + } + ocidesc = ocispec.Descriptor{ MediaType: config.MediaType, Size: info.Size, diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 4eab03ab8..b1542f13d 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -95,7 +95,7 @@ func FromGRPC(err error) error { msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrapf(cls, msg) + err = errors.Wrap(cls, msg) } else { err = errors.WithStack(cls) } diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index 95d21b7df..39972d74b 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -138,10 +138,10 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even ) closeAll := func() { - defer close(errq) - defer e.broadcaster.Remove(dst) - defer queue.Close() - defer channel.Close() + channel.Close() + queue.Close() + e.broadcaster.Remove(dst) + close(errq) } ch = evch diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go index 9dced523b..2be23574e 100644 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -71,7 +71,7 @@ func ParseAll(ss ...string) (Filter, error) { for _, s := range ss { f, err := Parse(s) if err != nil { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error()) + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) } fs = append(fs, f) diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go index c3961962c..45c52606d 100644 --- a/vendor/github.com/containerd/containerd/filters/scanner.go +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -185,7 +185,6 @@ func (s *scanner) scanQuoted(quote rune) { ch = s.next() } } - return } func (s *scanner) scanEscape(quote rune) rune { diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go index c58513c02..c0dd820ff 100644 --- a/vendor/github.com/containerd/containerd/identifiers/validate.go +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -45,7 +45,7 @@ var ( // Validate return nil if the string s is a valid identifier. // // identifiers must be valid domain names according to RFC 1035, section 2.3.1. To -// enforce case insensitvity, all characters must be lower case. +// enforce case insensitivity, all characters must be lower case. // // In general, identifiers that pass this validation, should be safe for use as // a domain names or filesystem path component. diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go index 230a9caf8..dac701bb8 100644 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -26,6 +26,7 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" ) var ( @@ -108,19 +109,30 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err // handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse // any children. // +// A concurrency limiter can be passed in to limit the number of concurrent +// handlers running. When limiter is nil, there is no limit. +// // Typically, this function will be used with `FetchHandler`, often composed // with other handlers. // // If any handler returns an error, the dispatch session will be canceled. -func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { +func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { eg, ctx := errgroup.WithContext(ctx) for _, desc := range descs { desc := desc + if limiter != nil { + if err := limiter.Acquire(ctx, 1); err != nil { + return err + } + } eg.Go(func() error { desc := desc children, err := handler.Handle(ctx, desc) + if limiter != nil { + limiter.Release(1) + } if err != nil { if errors.Cause(err) == ErrSkipDesc { return nil // don't traverse the children. @@ -129,7 +141,7 @@ func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) } if len(children) > 0 { - return Dispatch(ctx, handler, children...) + return Dispatch(ctx, handler, limiter, children...) } return nil diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index 4d6979d7a..f72684d82 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -129,6 +129,13 @@ type platformManifest struct { // Manifest resolves a manifest from the image for the given platform. // +// When a manifest descriptor inside of a manifest index does not have +// a platform defined, the platform from the image config is considered. +// +// If the descriptor points to a non-index manifest, then the manifest is +// unmarshalled and returned without considering the platform inside of the +// config. +// // TODO(stevvooe): This violates the current platform agnostic approach to this // package by returning a specific manifest type. We'll need to refactor this // to return a manifest descriptor or decide that we want to bring the API in @@ -152,7 +159,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc return nil, err } - if platform != nil { + if desc.Digest != image.Digest && platform != nil { if desc.Platform != nil && !platform.Match(*desc.Platform) { return nil, nil } diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index ca4ca071b..186a3b673 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -29,11 +29,14 @@ const ( MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" // Checkpoint/Restore Media Types - MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" - MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" - MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" - MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" - MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" + MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" + MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" + MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" + MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" + MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" + MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" // Legacy Docker schema1 manifest MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" ) diff --git a/vendor/github.com/containerd/containerd/images/oci/exporter.go b/vendor/github.com/containerd/containerd/images/oci/exporter.go index bf5751e6a..8bb535489 100644 --- a/vendor/github.com/containerd/containerd/images/oci/exporter.go +++ b/vendor/github.com/containerd/containerd/images/oci/exporter.go @@ -25,6 +25,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" ocispecs "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -37,6 +38,36 @@ import ( // e.g. application/vnd.docker.image.rootfs.diff.tar.gzip // -> application/vnd.oci.image.layer.v1.tar+gzip type V1Exporter struct { + AllPlatforms bool +} + +// V1ExporterOpt allows the caller to set additional options to a new V1Exporter +type V1ExporterOpt func(c *V1Exporter) error + +// DefaultV1Exporter return a default V1Exporter pointer +func DefaultV1Exporter() *V1Exporter { + return &V1Exporter{ + AllPlatforms: false, + } +} + +// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt +func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) { + exporter := DefaultV1Exporter() + for _, o := range opts { + if err := o(exporter); err != nil { + return exporter, err + } + } + return exporter, nil +} + +// WithAllPlatforms set V1Exporter`s AllPlatforms option +func WithAllPlatforms(allPlatforms bool) V1ExporterOpt { + return func(c *V1Exporter) error { + c.AllPlatforms = allPlatforms + return nil + } } // Export implements Exporter. @@ -56,8 +87,15 @@ func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc o return nil, nil } + childrenHandler := images.ChildrenHandler(store) + + if !oe.AllPlatforms { + // get local default platform to fetch image manifest + childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec())) + } + handlers := images.Handlers( - images.ChildrenHandler(store), + childrenHandler, images.HandlerFunc(exportHandler), ) diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go index 51d40a7f3..fa947fb25 100644 --- a/vendor/github.com/containerd/containerd/metadata/buckets.go +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -14,13 +14,11 @@ limitations under the License. */ -package metadata - -import ( - digest "github.com/opencontainers/go-digest" - bolt "go.etcd.io/bbolt" -) - +// Package metadata stores all labels and object specific metadata by namespace. +// This package also contains the main garbage collection logic for cleaning up +// resources consistently and atomically. Resources used by backends will be +// tracked in the metadata store to be exposed to consumers of this package. +// // The layout where a "/" delineates a bucket is described in the following // section. Please try to follow this as closely as possible when adding // functionality. We can bolster this with helpers and more structure if that @@ -43,6 +41,84 @@ import ( // // key: object-specific key identifying the storage bucket for the objects // contents. +// +// Below is the current database schema. This should be updated each time +// the structure is changed in addition to adding a migration and incrementing +// the database version. Note that `╘══*...*` refers to maps with arbitrary +// keys. +// ├──version : - Latest version, see migrations +// └──v1 - Schema version bucket +// ╘══*namespace* +// ├──labels +// │  ╘══*key* : - Label value +// ├──image +// │  ╘══*image name* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──target +// │   │  ├──digest : - Descriptor digest +// │   │  ├──mediatype : - Descriptor media type +// │   │  └──size : - Descriptor size +// │   └──labels +// │   ╘══*key* : - Label value +// ├──containers +// │  ╘══*container id* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──spec : - Proto marshaled spec +// │   ├──image : - Image name +// │   ├──snapshotter : - Snapshotter name +// │   ├──snapshotKey : - Snapshot key +// │   ├──runtime +// │   │  ├──name : - Runtime name +// │   │  ├──extensions +// │   │  │  ╘══*name* : - Proto marshaled extension +// │   │  └──options : - Proto marshaled options +// │   └──labels +// │   ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* +// │    ├──name : - Snapshot name in backend +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │    ├──parent : - Parent snapshot name +// │   ├──children +// │   │  ╘══*snapshot key* : - Child snapshot reference +// │   └──labels +// │   ╘══*key* : - Label value +// ├──content +// │  ├──blob +// │  │ ╘══*blob digest* +// │  │ ├──createdat : - Created at +// │  │ ├──updatedat : - Updated at +// │  │   ├──size : - Blob size +// │  │ └──labels +// │  │ ╘══*key* : - Label value +// │  └──ingests +// │   ╘══*ingest reference* +// │    ├──ref : - Ingest reference in backend +// │   ├──expireat : - Time to expire ingest +// │   └──expected : - Expected commit digest +// └──leases +// ╘══*lease id* +//   ├──createdat : - Created at +// ├──labels +// │ ╘══*key* : - Label value +//   ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* : - Snapshot reference +//   ├──content +// │  ╘══*blob digest* : - Content blob reference +// └──ingests +//   ╘══*ingest reference* : - Content ingest reference +package metadata + +import ( + digest "github.com/opencontainers/go-digest" + bolt "go.etcd.io/bbolt" +) + var ( bucketKeyVersion = []byte(schemaVersion) bucketKeyDBVersion = []byte("version") // stores the version of the schema diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go index 6e5622c36..af8224786 100644 --- a/vendor/github.com/containerd/containerd/metadata/containers.go +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -72,7 +72,7 @@ func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.C filter, err := filters.ParseAll(fs...) if err != nil { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error()) + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) } bkt := getContainersBucket(s.tx, namespace) diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index ac4440060..4d244914d 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -38,16 +38,31 @@ import ( type contentStore struct { content.Store - db *DB - l sync.RWMutex + db *DB + shared bool + l sync.RWMutex } // newContentStore returns a namespaced content store using an existing // content store interface. -func newContentStore(db *DB, cs content.Store) *contentStore { +// policy defines the sharing behavior for content between namespaces. Both +// modes will result in shared storage in the backend for committed. Choose +// "shared" to prevent separate namespaces from having to pull the same content +// twice. Choose "isolated" if the content must not be shared between +// namespaces. +// +// If the policy is "shared", writes will try to resolve the "expected" digest +// against the backend, allowing imports of content from other namespaces. In +// "isolated" mode, the client must prove they have the content by providing +// the entire blob before the content can be added to another namespace. +// +// Since we have only two policies right now, it's simpler using bool to +// represent it internally. +func newContentStore(db *DB, shared bool, cs content.Store) *contentStore { return &contentStore{ - Store: cs, - db: db, + Store: cs, + db: db, + shared: shared, } } @@ -383,13 +398,15 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) ( return nil } - if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil { - // Ensure the expected size is the same, it is likely - // an error if the size is mismatched but the caller - // must resolve this on commit - if wOpts.Desc.Size == 0 || wOpts.Desc.Size == st.Size { - shared = true - wOpts.Desc.Size = st.Size + if cs.shared { + if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil { + // Ensure the expected size is the same, it is likely + // an error if the size is mismatched but the caller + // must resolve this on commit + if wOpts.Desc.Size == 0 || wOpts.Desc.Size == st.Size { + shared = true + wOpts.Desc.Size = st.Size + } } } } @@ -553,52 +570,69 @@ func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected dig nw.l.RLock() defer nw.l.RUnlock() - return update(ctx, nw.db, func(tx *bolt.Tx) error { + var innerErr error + + if err := update(ctx, nw.db, func(tx *bolt.Tx) error { + dgst, err := nw.commit(ctx, tx, size, expected, opts...) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + innerErr = err + } bkt := getIngestsBucket(tx, nw.namespace) if bkt != nil { if err := bkt.DeleteBucket([]byte(nw.ref)); err != nil && err != bolt.ErrBucketNotFound { return err } } - dgst, err := nw.commit(ctx, tx, size, expected, opts...) - if err != nil { - return err - } if err := removeIngestLease(ctx, tx, nw.ref); err != nil { return err } return addContentLease(ctx, tx, dgst) - }) + }); err != nil { + return err + } + + return innerErr } func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) (digest.Digest, error) { var base content.Info for _, opt := range opts { if err := opt(&base); err != nil { + if nw.w != nil { + nw.w.Close() + } return "", err } } if err := validateInfo(&base); err != nil { + if nw.w != nil { + nw.w.Close() + } return "", err } var actual digest.Digest if nw.w == nil { if size != 0 && size != nw.desc.Size { - return "", errors.Errorf("%q failed size validation: %v != %v", nw.ref, nw.desc.Size, size) + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, nw.desc.Size, size) } if expected != "" && expected != nw.desc.Digest { - return "", errors.Errorf("%q unexpected digest", nw.ref) + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q unexpected digest", nw.ref) } size = nw.desc.Size actual = nw.desc.Digest } else { status, err := nw.w.Status() if err != nil { + nw.w.Close() return "", err } if size != 0 && size != status.Offset { - return "", errors.Errorf("%q failed size validation: %v != %v", nw.ref, status.Offset, size) + nw.w.Close() + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size) } size = status.Offset actual = nw.w.Digest() @@ -611,7 +645,7 @@ func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, bkt, err := createBlobBucket(tx, nw.namespace, actual) if err != nil { if err == bolt.ErrBucketExists { - return "", errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual) + return actual, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual) } return "", err } @@ -745,7 +779,7 @@ func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, er t1 := time.Now() defer func() { if err == nil { - d = time.Now().Sub(t1) + d = time.Since(t1) } cs.l.Unlock() }() diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go index 507d6d22d..7f1b27b38 100644 --- a/vendor/github.com/containerd/containerd/metadata/db.go +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -46,6 +46,19 @@ const ( dbVersion = 3 ) +// DBOpt configures how we set up the DB +type DBOpt func(*dbOptions) + +// WithPolicyIsolated isolates contents between namespaces +func WithPolicyIsolated(o *dbOptions) { + o.shared = false +} + +// dbOptions configure db options. +type dbOptions struct { + shared bool +} + // DB represents a metadata database backed by a bolt // database. The database is fully namespaced and stores // image, container, namespace, snapshot, and content data @@ -72,19 +85,28 @@ type DB struct { // mutationCallbacks are called after each mutation with the flag // set indicating whether any dirty flags are set mutationCallbacks []func(bool) + + dbopts dbOptions } // NewDB creates a new metadata database using the provided // bolt database, content store, and snapshotters. -func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter) *DB { +func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter, opts ...DBOpt) *DB { m := &DB{ db: db, ss: make(map[string]*snapshotter, len(ss)), dirtySS: map[string]struct{}{}, + dbopts: dbOptions{ + shared: true, + }, + } + + for _, opt := range opts { + opt(&m.dbopts) } // Initialize data stores - m.cs = newContentStore(m, cs) + m.cs = newContentStore(m, m.dbopts.shared, cs) for name, sn := range ss { m.ss[name] = newSnapshotter(m, name, sn) } @@ -154,7 +176,7 @@ func (m *DB) Init(ctx context.Context) error { if err := m.migrate(tx); err != nil { return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version) } - log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version) + log.G(ctx).WithField("d", time.Since(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version) } } @@ -306,7 +328,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { m.cleanupSnapshotter(snapshotterName) sl.Lock() - stats.SnapshotD[snapshotterName] = time.Now().Sub(st1) + stats.SnapshotD[snapshotterName] = time.Since(st1) sl.Unlock() wg.Done() @@ -321,7 +343,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { go func() { ct1 := time.Now() m.cleanupContent() - stats.ContentD = time.Now().Sub(ct1) + stats.ContentD = time.Since(ct1) wg.Done() }() m.dirtyCS = false @@ -329,7 +351,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { m.dirtyL.Unlock() - stats.MetaD = time.Now().Sub(t1) + stats.MetaD = time.Since(t1) m.wlock.Unlock() wg.Wait() diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go index a670ce1a3..99503fad7 100644 --- a/vendor/github.com/containerd/containerd/metadata/gc.go +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -296,10 +296,6 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name)) if bkt == nil { - getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots).ForEach(func(k, v []byte) error { - return nil - }) - // Node may be created from dead edge return nil } diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go index 6ae9dc855..762f6fb1a 100644 --- a/vendor/github.com/containerd/containerd/metadata/images.go +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -84,7 +84,7 @@ func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, er filter, err := filters.ParseAll(fs...) if err != nil { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error()) + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) } var m []images.Image diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go index 635c3b3c3..a3c1701d8 100644 --- a/vendor/github.com/containerd/containerd/metadata/leases.go +++ b/vendor/github.com/containerd/containerd/metadata/leases.go @@ -122,7 +122,7 @@ func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, filter, err := filters.ParseAll(fs...) if err != nil { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error()) + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) } var ll []leases.Lease diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 75b80b0bc..b1665c019 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -628,7 +628,7 @@ func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err } } if err == nil { - d = time.Now().Sub(t1) + d = time.Since(t1) } }() diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go index 6741293f8..95da9428e 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_unix.go +++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go @@ -1,4 +1,4 @@ -// +build darwin freebsd +// +build darwin freebsd openbsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go b/vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go similarity index 98% rename from vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go rename to vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go index bbe79767e..8f8dbf95a 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go @@ -1,3 +1,5 @@ +// +build freebsd openbsd + /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go index a986f8f4e..369d045d7 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go @@ -68,7 +68,7 @@ func parseInfoFile(r io.Reader) ([]Info, error) { numFields := len(fields) if numFields < 10 { // should be at least 10 fields - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields) } p := Info{} // ignore any numbers parsing errors, as there should not be any @@ -76,7 +76,7 @@ func parseInfoFile(r io.Reader) ([]Info, error) { p.Parent, _ = strconv.Atoi(fields[1]) mm := strings.Split(fields[2], ":") if len(mm) != 2 { - return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + return nil, fmt.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm) } p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) @@ -101,11 +101,11 @@ func parseInfoFile(r io.Reader) ([]Info, error) { } } if i == numFields { - return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) + return nil, fmt.Errorf("parsing '%s' failed: missing separator ('-')", text) } // There should be 3 fields after the separator... if i+4 > numFields { - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) + return nil, fmt.Errorf("parsing '%s' failed: not enough fields after a separator", text) } // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name // (like "//serv/My Documents") _may_ end up having a space in the last field diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go index eba602f1a..ae998db6b 100644 --- a/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo +// +build !linux,!freebsd,!solaris,!openbsd freebsd,!cgo solaris,!cgo openbsd,!cgo /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index b83f40ac6..3e7b5492a 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -209,6 +209,7 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { Linux: &specs.Linux{ MaskedPaths: []string{ "/proc/acpi", + "/proc/asound", "/proc/kcore", "/proc/keys", "/proc/latency_stats", @@ -219,7 +220,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { "/proc/scsi", }, ReadonlyPaths: []string{ - "/proc/asound", "/proc/bus", "/proc/fs", "/proc/irq", @@ -247,17 +247,8 @@ func populateDefaultWindowsSpec(ctx context.Context, s *Spec, id string) error { Root: &specs.Root{}, Process: &specs.Process{ Cwd: `C:\`, - ConsoleSize: &specs.Box{ - Width: 80, - Height: 20, - }, - }, - Windows: &specs.Windows{ - IgnoreFlushesDuringBoot: true, - Network: &specs.WindowsNetwork{ - AllowUnqualifiedDNSQuery: true, - }, }, + Windows: &specs.Windows{}, } return nil } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index ab61d9b26..b9949f837 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -141,8 +141,10 @@ func WithEnv(environmentVariables []string) SpecOpts { // replaced by env key or appended to the list func replaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) + results := make([]string, 0, len(defaults)) for i, e := range defaults { parts := strings.SplitN(e, "=", 2) + results = append(results, e) cache[parts[0]] = i } @@ -150,7 +152,7 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string { // Values w/o = means they want this env to be removed/unset. if !strings.Contains(value, "=") { if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed + results[i] = "" // Used to indicate it should be removed } continue } @@ -158,21 +160,21 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string { // Just do a normal set/update parts := strings.SplitN(value, "=", 2) if i, exists := cache[parts[0]]; exists { - defaults[i] = value + results[i] = value } else { - defaults = append(defaults, value) + results = append(results, value) } } // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) + for i := 0; i < len(results); i++ { + if results[i] == "" { + results = append(results[:i], results[i+1:]...) i-- } } - return defaults + return results } // WithProcessArgs replaces the args on the generated spec @@ -310,7 +312,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { setProcess(s) if s.Linux != nil { - s.Process.Env = append(s.Process.Env, config.Env...) + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) cmd := config.Cmd if len(args) > 0 { cmd = args @@ -332,8 +334,14 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { // even if there is no specified user in the image config return WithAdditionalGIDs("root")(ctx, client, c, s) } else if s.Windows != nil { - s.Process.Env = config.Env - s.Process.Args = append(config.Entrypoint, config.Cmd...) + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) + cmd := config.Cmd + if len(args) > 0 { + cmd = args + } + s.Process.Args = append(config.Entrypoint, cmd...) + + s.Process.Cwd = config.WorkingDir s.Process.User = specs.User{ Username: config.User, } @@ -654,6 +662,10 @@ func WithUsername(username string) SpecOpts { // The passed in user can be either a uid or a username. func WithAdditionalGIDs(userstr string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + // For LCOW additional GID's not supported + if s.Windows != nil { + return nil + } setProcess(s) setAdditionalGids := func(root string) error { var username string @@ -1022,3 +1034,32 @@ func WithWindowsHyperV(_ context.Context, _ Client, _ *containers.Container, s * } return nil } + +// WithMemoryLimit sets the `Linux.LinuxResources.Memory.Limit` section to the +// `limit` specified if the `Linux` section is not `nil`. Additionally sets the +// `Windows.WindowsResources.Memory.Limit` section if the `Windows` section is +// not `nil`. +func WithMemoryLimit(limit uint64) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + if s.Linux.Resources.Memory == nil { + s.Linux.Resources.Memory = &specs.LinuxMemory{} + } + l := int64(limit) + s.Linux.Resources.Memory.Limit = &l + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + if s.Windows.Resources.Memory == nil { + s.Windows.Resources.Memory = &specs.WindowsMemoryResources{} + } + s.Windows.Resources.Memory.Limit = &limit + } + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go new file mode 100644 index 000000000..fbe1cb33c --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -0,0 +1,67 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + + "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the +// `count` specified. +func WithWindowsCPUCount(count uint64) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + if s.Windows.Resources.CPU == nil { + s.Windows.Resources.CPU = &specs.WindowsCPUResources{} + } + s.Windows.Resources.CPU.Count = &count + return nil + } +} + +// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`. +func WithWindowsIgnoreFlushesDuringBoot() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + s.Windows.IgnoreFlushesDuringBoot = true + return nil + } +} + +// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.IgnoreFlushesDuringBoot`. +func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + + s.Windows.Network.AllowUnqualifiedDNSQuery = true + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index a5c5ab42b..bf6476b64 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -74,6 +74,22 @@ func getCPUInfo(pattern string) (info string, err error) { } func getCPUVariant() string { + if runtime.GOOS == "windows" { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + variant, err := getCPUInfo("Cpu architecture") if err != nil { log.L.WithError(err).Error("failure getting variant") diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index 0847ae7f2..4d2d486d0 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -42,10 +42,7 @@ var ( // IsSkipPlugin returns true if the error is skipping the plugin func IsSkipPlugin(err error) bool { - if errors.Cause(err) == ErrSkipPlugin { - return true - } - return false + return errors.Cause(err) == ErrSkipPlugin } // Type is the type of the plugin diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth.go b/vendor/github.com/containerd/containerd/remotes/docker/auth.go index 80bcb9dcf..70cfdea4f 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth.go @@ -79,8 +79,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go new file mode 100644 index 000000000..73adb5a2f --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -0,0 +1,317 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context/ctxhttp" +) + +type dockerAuthorizer struct { + credentials func(string) (string, string, error) + + client *http.Client + mu sync.Mutex + + auth map[string]string +} + +// NewAuthorizer creates a Docker authorizer using the provided function to +// get credentials for the token server or basic auth. +func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { + if client == nil { + client = http.DefaultClient + } + return &dockerAuthorizer{ + credentials: f, + client: client, + auth: map[string]string{}, + } +} + +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + // TODO: Lookup matching challenge and scope rather than just host + if auth := a.getAuth(req.URL.Host); auth != "" { + req.Header.Set("Authorization", auth) + } + + return nil +} + +func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { + last := responses[len(responses)-1] + host := last.Request.URL.Host + for _, c := range parseAuthHeader(last.Header) { + if c.scheme == bearerAuth { + if err := invalidAuthorization(c, responses); err != nil { + // TODO: Clear token + a.setAuth(host, "") + return err + } + + // TODO(dmcg): Store challenge, not token + // Move token fetching to authorize + return a.setTokenAuth(ctx, host, c.parameters) + } else if c.scheme == basicAuth && a.credentials != nil { + // TODO: Resolve credentials on authorize + username, secret, err := a.credentials(host) + if err != nil { + return err + } + if username != "" && secret != "" { + auth := username + ":" + secret + a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth)))) + return nil + } + } + } + + return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") +} + +func (a *dockerAuthorizer) getAuth(host string) string { + a.mu.Lock() + defer a.mu.Unlock() + + return a.auth[host] +} + +func (a *dockerAuthorizer) setAuth(host string, auth string) bool { + a.mu.Lock() + defer a.mu.Unlock() + + changed := a.auth[host] != auth + a.auth[host] = auth + + return changed +} + +func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error { + realm, ok := params["realm"] + if !ok { + return errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return errors.Wrap(err, "invalid token auth challenge realm") + } + + to := tokenOptions{ + realm: realmURL.String(), + service: params["service"], + } + + to.scopes = getTokenScopes(ctx, params) + if len(to.scopes) == 0 { + return errors.Errorf("no scope specified for token auth challenge") + } + + if a.credentials != nil { + to.username, to.secret, err = a.credentials(host) + if err != nil { + return err + } + } + + var token string + if to.secret != "" { + // Credential information is provided, use oauth POST endpoint + token, err = a.fetchTokenWithOAuth(ctx, to) + if err != nil { + return errors.Wrap(err, "failed to fetch oauth token") + } + } else { + // Do request anonymously + token, err = a.fetchToken(ctx, to) + if err != nil { + return errors.Wrap(err, "failed to fetch anonymous token") + } + } + a.setAuth(host, fmt.Sprintf("Bearer %s", token)) + + return nil +} + +type tokenOptions struct { + realm string + service string + scopes []string + username string + secret string +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { + form := url.Values{} + form.Set("scope", strings.Join(to.scopes, " ")) + form.Set("service", to.service) + // TODO: Allow setting client_id + form.Set("client_id", "containerd-client") + + if to.username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.username) + form.Set("password", to.secret) + } + + resp, err := ctxhttp.Post( + ctx, a.client, to.realm, + "application/x-www-form-urlencoded; charset=utf-8", + strings.NewReader(form.Encode()), + ) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { + return a.fetchToken(ctx, to) + } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { + b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + log.G(ctx).WithFields(logrus.Fields{ + "status": resp.Status, + "body": string(b), + }).Debugf("token request failed") + // TODO: handle error body and write debug output + return "", errors.Errorf("unexpected status: %s", resp.Status) + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + return tr.AccessToken, nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// getToken fetches a token using a GET request +func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) { + req, err := http.NewRequest("GET", to.realm, nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + + if to.service != "" { + reqParams.Add("service", to.service) + } + + for _, scope := range to.scopes { + reqParams.Add("scope", scope) + } + + if to.secret != "" { + req.SetBasicAuth(to.username, to.secret) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := ctxhttp.Do(ctx, a.client, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + // TODO: handle error body and write debug output + return "", errors.Errorf("unexpected status: %s", resp.Status) + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", ErrNoToken + } + + return tr.Token, nil +} + +func invalidAuthorization(c challenge, responses []*http.Response) error { + errStr := c.parameters["error"] + if errStr == "" { + return nil + } + + n := len(responses) + if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { + return nil + } + + return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) +} + +func sameRequest(r1, r2 *http.Request) bool { + if r1.Method != r2.Method { + return false + } + if *r1.URL != *r2.URL { + return false + } + return true +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/converter.go new file mode 100644 index 000000000..43e6b372c --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter.go @@ -0,0 +1,88 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// LegacyConfigMediaType should be replaced by OCI image spec. +// +// More detail: docker/distribution#1622 +const LegacyConfigMediaType = "application/octet-stream" + +// ConvertManifest changes application/octet-stream to schema2 config media type if need. +// +// NOTE: +// 1. original manifest will be deleted by next gc round. +// 2. don't cover manifest list. +func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { + if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest || + desc.MediaType == ocispec.MediaTypeImageManifest) { + + log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) + return desc, nil + } + + // read manifest data + mb, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data") + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(mb, &manifest); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest") + } + + // check config media type + if manifest.Config.MediaType != LegacyConfigMediaType { + return desc, nil + } + + manifest.Config.MediaType = images.MediaTypeDockerSchema2Config + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest") + } + + // update manifest with gc labels + desc.Digest = digest.Canonical.FromBytes(data) + desc.Size = int64(len(data)) + + labels := map[string]string{} + for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content") + } + return desc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index f0a677c83..427052b77 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -18,22 +18,18 @@ package docker import ( "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" "net/http" "net/url" "path" "strconv" "strings" - "sync" - "time" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/version" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -51,24 +47,45 @@ var ( ErrInvalidAuthorization = errors.New("authorization failed") ) -type dockerResolver struct { - credentials func(string) (string, string, error) - host func(string) (string, error) - plainHTTP bool - client *http.Client - tracker StatusTracker +// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. +// An Authorizer is responsible for caching tokens or credentials used by +// requests. +type Authorizer interface { + // Authorize sets the appropriate `Authorization` header on the given + // request. + // + // If no authorization is found for the request, the request remains + // unmodified. It may also add an `Authorization` header as + // "bearer " + // "basic " + Authorize(context.Context, *http.Request) error + + // AddResponses adds a 401 response for the authorizer to consider when + // authorizing requests. The last response should be unauthorized and + // the previous requests are used to consider redirects and retries + // that may have led to the 401. + // + // If response is not handled, returns `ErrNotImplemented` + AddResponses(context.Context, []*http.Response) error } // ResolverOptions are used to configured a new Docker register resolver type ResolverOptions struct { + // Authorizer is used to authorize registry requests + Authorizer Authorizer + // Credentials provides username and secret given a host. // If username is empty but a secret is given, that secret - // is interpretted as a long lived token. + // is interpreted as a long lived token. + // Deprecated: use Authorizer Credentials func(string) (string, string, error) // Host provides the hostname given a namespace. Host func(string) (string, error) + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + // PlainHTTP specifies to use plain http and not https PlainHTTP bool @@ -89,22 +106,47 @@ func DefaultHost(ns string) (string, error) { return ns, nil } +type dockerResolver struct { + auth Authorizer + host func(string) (string, error) + headers http.Header + plainHTTP bool + client *http.Client + tracker StatusTracker +} + // NewResolver returns a new resolver to a Docker registry func NewResolver(options ResolverOptions) remotes.Resolver { - tracker := options.Tracker - if tracker == nil { - tracker = NewInMemoryTracker() + if options.Tracker == nil { + options.Tracker = NewInMemoryTracker() + } + if options.Host == nil { + options.Host = DefaultHost + } + if options.Headers == nil { + options.Headers = make(http.Header) + } + if _, ok := options.Headers["Accept"]; !ok { + // set headers for all the types we support for resolution. + options.Headers.Set("Accept", strings.Join([]string{ + images.MediaTypeDockerSchema2Manifest, + images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, "*"}, ", ")) } - host := options.Host - if host == nil { - host = DefaultHost + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + if options.Authorizer == nil { + options.Authorizer = NewAuthorizer(options.Client, options.Credentials) } return &dockerResolver{ - credentials: options.Credentials, - host: host, - plainHTTP: options.PlainHTTP, - client: options.Client, - tracker: tracker, + auth: options.Authorizer, + host: options.Host, + headers: options.Headers, + plainHTTP: options.PlainHTTP, + client: options.Client, + tracker: options.Tracker, } } @@ -160,12 +202,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, err } - // set headers for all the types we support for resolution. - req.Header.Set("Accept", strings.Join([]string{ - images.MediaTypeDockerSchema2Manifest, - images.MediaTypeDockerSchema2ManifestList, - ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*"}, ", ")) + req.Header = r.headers log.G(ctx).Debug("resolving") resp, err := fetcher.doRequestWithRetries(ctx, req, nil) @@ -272,18 +309,14 @@ type dockerBase struct { refspec reference.Spec base url.URL - client *http.Client - useBasic bool - username, secret string - token string - mu sync.Mutex + client *http.Client + auth Authorizer } func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { var ( - err error - base url.URL - username, secret string + err error + base url.URL ) host := refspec.Hostname() @@ -300,61 +333,40 @@ func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { base.Scheme = "http" } - if r.credentials != nil { - username, secret, err = r.credentials(base.Host) - if err != nil { - return nil, err - } - } - prefix := strings.TrimPrefix(refspec.Locator, host+"/") base.Path = path.Join("/v2", prefix) return &dockerBase{ - refspec: refspec, - base: base, - client: r.client, - username: username, - secret: secret, + refspec: refspec, + base: base, + client: r.client, + auth: r.auth, }, nil } -func (r *dockerBase) getToken() string { - r.mu.Lock() - defer r.mu.Unlock() - - return r.token -} - -func (r *dockerBase) setToken(token string) bool { - r.mu.Lock() - defer r.mu.Unlock() - - changed := r.token != token - r.token = token - - return changed -} - func (r *dockerBase) url(ps ...string) string { url := r.base url.Path = path.Join(url.Path, path.Join(ps...)) return url.String() } -func (r *dockerBase) authorize(req *http.Request) { - token := r.getToken() - if r.useBasic { - req.SetBasicAuth(r.username, r.secret) - } else if token != "" { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) +func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { + // Check if has header for host + if r.auth != nil { + if err := r.auth.Authorize(ctx, req); err != nil { + return err + } } + + return nil } func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") - r.authorize(req) + if err := r.authorize(ctx, req); err != nil { + return nil, errors.Wrap(err, "failed to authorize") + } resp, err := ctxhttp.Do(ctx, r.client, req) if err != nil { return nil, errors.Wrap(err, "failed to do request") @@ -392,23 +404,14 @@ func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, respon last := responses[len(responses)-1] if last.StatusCode == http.StatusUnauthorized { log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - for _, c := range parseAuthHeader(last.Header) { - if c.scheme == bearerAuth { - if err := invalidAuthorization(c, responses); err != nil { - r.setToken("") - return nil, err - } - if err := r.setTokenAuth(ctx, c.parameters); err != nil { - return nil, err - } - return copyRequest(req) - } else if c.scheme == basicAuth { - if r.username != "" && r.secret != "" { - r.useBasic = true - } + if r.auth != nil { + if err := r.auth.AddResponses(ctx, responses); err == nil { return copyRequest(req) + } else if !errdefs.IsNotImplemented(err) { + return nil, err } } + return nil, nil } else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead { // Support registries which have not properly implemented the HEAD method for @@ -424,30 +427,6 @@ func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, respon return nil, nil } -func invalidAuthorization(c challenge, responses []*http.Response) error { - errStr := c.parameters["error"] - if errStr == "" { - return nil - } - - n := len(responses) - if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { - return nil - } - - return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) -} - -func sameRequest(r1, r2 *http.Request) bool { - if r1.Method != r2.Method { - return false - } - if *r1.URL != *r2.URL { - return false - } - return true -} - func copyRequest(req *http.Request) (*http.Request, error) { ireq := *req if ireq.GetBody != nil { @@ -459,167 +438,3 @@ func copyRequest(req *http.Request) (*http.Request, error) { } return &ireq, nil } - -func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error { - realm, ok := params["realm"] - if !ok { - return errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - to := tokenOptions{ - realm: realmURL.String(), - service: params["service"], - } - - to.scopes = getTokenScopes(ctx, params) - if len(to.scopes) == 0 { - return errors.Errorf("no scope specified for token auth challenge") - } - - var token string - if r.secret != "" { - // Credential information is provided, use oauth POST endpoint - token, err = r.fetchTokenWithOAuth(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch oauth token") - } - } else { - // Do request anonymously - token, err = r.fetchToken(ctx, to) - if err != nil { - return errors.Wrap(err, "failed to fetch anonymous token") - } - } - r.setToken(token) - - return nil -} - -type tokenOptions struct { - realm string - service string - scopes []string -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (r *dockerBase) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { - form := url.Values{} - form.Set("scope", strings.Join(to.scopes, " ")) - form.Set("service", to.service) - // TODO: Allow setting client_id - form.Set("client_id", "containerd-dist-tool") - - if r.username == "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", r.secret) - } else { - form.Set("grant_type", "password") - form.Set("username", r.username) - form.Set("password", r.secret) - } - - resp, err := ctxhttp.PostForm(ctx, r.client, to.realm, form) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // Registries without support for POST may return 404 for POST /v2/token. - // As of September 2017, GCR is known to return 404. - // As of February 2018, JFrog Artifactory is known to return 401. - if (resp.StatusCode == 405 && r.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { - return r.fetchToken(ctx, to) - } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { - b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "body": string(b), - }).Debugf("token request failed") - // TODO: handle error body and write debug output - return "", errors.Errorf("unexpected status: %s", resp.Status) - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - return tr.AccessToken, nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -// getToken fetches a token using a GET request -func (r *dockerBase) fetchToken(ctx context.Context, to tokenOptions) (string, error) { - req, err := http.NewRequest("GET", to.realm, nil) - if err != nil { - return "", err - } - - reqParams := req.URL.Query() - - if to.service != "" { - reqParams.Add("service", to.service) - } - - for _, scope := range to.scopes { - reqParams.Add("scope", scope) - } - - if r.secret != "" { - req.SetBasicAuth(r.username, r.secret) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := ctxhttp.Do(ctx, r.client, req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - // TODO: handle error body and write debug output - return "", errors.Errorf("unexpected status: %s", resp.Status) - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", ErrNoToken - } - - return tr.Token, nil -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index 45ac1933f..766c24a26 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "io/ioutil" + "strconv" "strings" "sync" "time" @@ -42,7 +43,10 @@ import ( "github.com/pkg/errors" ) -const manifestSizeLimit = 8e6 // 8MB +const ( + manifestSizeLimit = 8e6 // 8MB + labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" +) type blobState struct { diffID digest.Digest @@ -353,10 +357,11 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro Digest: desc.Digest, Labels: map[string]string{ "containerd.io/uncompressed": state.diffID.String(), + labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), }, } - if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { + if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { return errors.Wrap(err, "failed to update uncompressed label") } @@ -380,7 +385,18 @@ func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descri return false, nil } - bState := blobState{empty: false} + emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] + if !ok { + return false, nil + } + + isEmpty, err := strconv.ParseBool(emptyVal) + if err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) + return false, nil + } + + bState := blobState{empty: isEmpty} if bState.diffID, err = digest.Parse(diffID); err != nil { log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 77310fb62..86922de99 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -181,7 +181,7 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr pushHandler, ) - if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil { + if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go new file mode 100644 index 000000000..d7e666053 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/service.go @@ -0,0 +1,492 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "sync" + + api "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + ptypes "github.com/gogo/protobuf/types" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type service struct { + store content.Store +} + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +var _ api.ContentServer = &service{} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.GRPCPlugin, + ID: "content", + Requires: []plugin.Type{ + plugin.ServicePlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return nil, err + } + p, ok := plugins[services.ContentService] + if !ok { + return nil, errors.New("content store service not found") + } + cs, err := p.Instance() + if err != nil { + return nil, err + } + return NewService(cs.(content.Store)), nil + }, + }) +} + +// NewService returns the content GRPC server +func NewService(cs content.Store) api.ContentServer { + return &service{store: cs} +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterContentServer(server, s) + return nil +} + +func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { + if err := req.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) + } + + bi, err := s.store.Info(ctx, req.Digest) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.InfoResponse{ + Info: infoToGRPC(bi), + }, nil +} + +func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { + if err := req.Info.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) + } + + info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.UpdateResponse{ + Info: infoToGRPC(info), + }, nil +} + +func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { + var ( + buffer []api.Info + sendBlock = func(block []api.Info) error { + // send last block + return session.Send(&api.ListContentResponse{ + Info: block, + }) + } + ) + + if err := s.store.Walk(session.Context(), func(info content.Info) error { + buffer = append(buffer, api.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + Labels: info.Labels, + }) + + if len(buffer) >= 100 { + if err := sendBlock(buffer); err != nil { + return err + } + + buffer = buffer[:0] + } + + return nil + }, req.Filters...); err != nil { + return err + } + + if len(buffer) > 0 { + // send last block + if err := sendBlock(buffer); err != nil { + return err + } + } + + return nil +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("digest", req.Digest).Debugf("delete content") + if err := req.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, err.Error()) + } + + if err := s.store.Delete(ctx, req.Digest); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { + if err := req.Digest.Validate(); err != nil { + return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) + } + + oi, err := s.store.Info(session.Context(), req.Digest) + if err != nil { + return errdefs.ToGRPC(err) + } + + ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest}) + if err != nil { + return errdefs.ToGRPC(err) + } + defer ra.Close() + + var ( + offset = req.Offset + // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. + // offset+size can be larger than oi.Size. + size = req.Size_ + + // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably + // little inefficient for work over a fast network. We can tune this later. + p = bufPool.Get().(*[]byte) + ) + defer bufPool.Put(p) + + if offset < 0 { + offset = 0 + } + + if offset > oi.Size { + return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) + } + + if size <= 0 || offset+size > oi.Size { + size = oi.Size - offset + } + + _, err = io.CopyBuffer( + &readResponseWriter{session: session}, + io.NewSectionReader(ra, offset, size), *p) + return errdefs.ToGRPC(err) +} + +// readResponseWriter is a writer that places the output into ReadContentRequest messages. +// +// This allows io.CopyBuffer to do the heavy lifting of chunking the responses +// into the buffer size. +type readResponseWriter struct { + offset int64 + session api.Content_ReadServer +} + +func (rw *readResponseWriter) Write(p []byte) (n int, err error) { + if err := rw.session.Send(&api.ReadContentResponse{ + Offset: rw.offset, + Data: p, + }); err != nil { + return 0, err + } + + rw.offset += int64(len(p)) + return len(p), nil +} + +func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) { + status, err := s.store.Status(ctx, req.Ref) + if err != nil { + return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) + } + + var resp api.StatusResponse + resp.Status = &api.Status{ + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Ref: status.Ref, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + } + + return &resp, nil +} + +func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) { + statuses, err := s.store.ListStatuses(ctx, req.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + var resp api.ListStatusesResponse + for _, status := range statuses { + resp.Statuses = append(resp.Statuses, api.Status{ + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Ref: status.Ref, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }) + } + + return &resp, nil +} + +func (s *service) Write(session api.Content_WriteServer) (err error) { + var ( + ctx = session.Context() + msg api.WriteContentResponse + req *api.WriteContentRequest + ref string + total int64 + expected digest.Digest + ) + + defer func(msg *api.WriteContentResponse) { + // pump through the last message if no error was encountered + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists { + // TODO(stevvooe): Really need a log line here to track which + // errors are actually causing failure on the server side. May want + // to configure the service with an interceptor to make this work + // identically across all GRPC methods. + // + // This is pretty noisy, so we can remove it but leave it for now. + log.G(ctx).WithError(err).Error("(*service).Write failed") + } + + return + } + + err = session.Send(msg) + }(&msg) + + // handle the very first request! + req, err = session.Recv() + if err != nil { + return err + } + + ref = req.Ref + + if ref == "" { + return status.Errorf(codes.InvalidArgument, "first message must have a reference") + } + + fields := logrus.Fields{ + "ref": ref, + } + total = req.Total + expected = req.Expected + if total > 0 { + fields["total"] = total + } + + if expected != "" { + fields["expected"] = expected + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) + + log.G(ctx).Debug("(*service).Write started") + // this action locks the writer for the session. + wr, err := s.store.Writer(ctx, + content.WithRef(ref), + content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected})) + if err != nil { + return errdefs.ToGRPC(err) + } + defer wr.Close() + + for { + msg.Action = req.Action + ws, err := wr.Status() + if err != nil { + return errdefs.ToGRPC(err) + } + + msg.Offset = ws.Offset // always set the offset. + + // NOTE(stevvooe): In general, there are two cases underwhich a remote + // writer is used. + // + // For pull, we almost always have this before fetching large content, + // through descriptors. We allow predeclaration of the expected size + // and digest. + // + // For push, it is more complex. If we want to cut through content into + // storage, we may have no expectation until we are done processing the + // content. The case here is the following: + // + // 1. Start writing content. + // 2. Compress inline. + // 3. Validate digest and size (maybe). + // + // Supporting these two paths is quite awkward but it lets both API + // users use the same writer style for each with a minimum of overhead. + if req.Expected != "" { + if expected != "" && expected != req.Expected { + log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected) + } + expected = req.Expected + + if _, err := s.store.Info(session.Context(), req.Expected); err == nil { + if err := wr.Close(); err != nil { + log.G(ctx).WithError(err).Error("failed to close writer") + } + if err := s.store.Abort(session.Context(), ref); err != nil { + log.G(ctx).WithError(err).Error("failed to abort write") + } + + return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) + } + } + + if req.Total > 0 { + // Update the expected total. Typically, this could be seen at + // negotiation time or on a commit message. + if total > 0 && req.Total != total { + log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total) + } + total = req.Total + } + + switch req.Action { + case api.WriteActionStat: + msg.Digest = wr.Digest() + msg.StartedAt = ws.StartedAt + msg.UpdatedAt = ws.UpdatedAt + msg.Total = total + case api.WriteActionWrite, api.WriteActionCommit: + if req.Offset > 0 { + // validate the offset if provided + if req.Offset != ws.Offset { + return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) + } + } + + if req.Offset == 0 && ws.Offset > 0 { + if err := wr.Truncate(req.Offset); err != nil { + return errors.Wrapf(err, "truncate failed") + } + msg.Offset = req.Offset + } + + // issue the write if we actually have data. + if len(req.Data) > 0 { + // While this looks like we could use io.WriterAt here, because we + // maintain the offset as append only, we just issue the write. + n, err := wr.Write(req.Data) + if err != nil { + return errdefs.ToGRPC(err) + } + + if n != len(req.Data) { + // TODO(stevvooe): Perhaps, we can recover this by including it + // in the offset on the write return. + return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) + } + + msg.Offset += int64(n) + } + + if req.Action == api.WriteActionCommit { + var opts []content.Opt + if req.Labels != nil { + opts = append(opts, content.WithLabels(req.Labels)) + } + if err := wr.Commit(ctx, total, expected, opts...); err != nil { + return errdefs.ToGRPC(err) + } + } + + msg.Digest = wr.Digest() + } + + if err := session.Send(&msg); err != nil { + return err + } + + req, err = session.Recv() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + } +} + +func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) { + if err := s.store.Abort(ctx, req.Ref); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func infoToGRPC(info content.Info) api.Info { + return api.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} + +func infoFromGRPC(info api.Info) content.Info { + return content.Info{ + Digest: info.Digest, + Size: info.Size_, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} diff --git a/vendor/github.com/containerd/containerd/services/content/store.go b/vendor/github.com/containerd/containerd/services/content/store.go new file mode 100644 index 000000000..3de91d37c --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/store.go @@ -0,0 +1,71 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + + eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/services" + digest "github.com/opencontainers/go-digest" +) + +// store wraps content.Store with proper event published. +type store struct { + content.Store + publisher events.Publisher +} + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.ServicePlugin, + ID: services.ContentService, + Requires: []plugin.Type{ + plugin.MetadataPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + m, err := ic.Get(plugin.MetadataPlugin) + if err != nil { + return nil, err + } + + s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events) + return s, err + }, + }) +} + +func newContentStore(cs content.Store, publisher events.Publisher) (content.Store, error) { + return &store{ + Store: cs, + publisher: publisher, + }, nil +} + +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + if err := s.Store.Delete(ctx, dgst); err != nil { + return err + } + // TODO: Consider whether we should return error here. + return s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{ + Digest: dgst, + }) +} diff --git a/vendor/github.com/containerd/containerd/services/services.go b/vendor/github.com/containerd/containerd/services/services.go new file mode 100644 index 000000000..efc920093 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/services.go @@ -0,0 +1,36 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package services + +const ( + // ContentService is id of content service. + ContentService = "content-service" + // SnapshotsService is id of snapshots service. + SnapshotsService = "snapshots-service" + // ImagesService is id of images service. + ImagesService = "images-service" + // ContainersService is id of containers service. + ContainersService = "containers-service" + // TasksService is id of tasks service. + TasksService = "tasks-service" + // NamespacesService is id of namespaces service. + NamespacesService = "namespaces-service" + // LeasesService is id of leases service. + LeasesService = "leases-service" + // DiffService is id of diff service. + DiffService = "diff-service" +) diff --git a/vendor/github.com/containerd/containerd/snapshots/native/native.go b/vendor/github.com/containerd/containerd/snapshots/native/native.go index 0b9c3befc..5532ea66d 100644 --- a/vendor/github.com/containerd/containerd/snapshots/native/native.go +++ b/vendor/github.com/containerd/containerd/snapshots/native/native.go @@ -241,9 +241,8 @@ func (o *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho return storage.WalkInfo(ctx, fn) } -func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) ([]mount.Mount, error) { +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { var ( - err error path, td string ) diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go index 2c296adbe..9ad82c7f3 100644 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go @@ -350,7 +350,7 @@ func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Trans return cleanup, nil } -func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) ([]mount.Mount, error) { +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { ctx, t, err := o.ms.TransactionContext(ctx, true) if err != nil { return nil, err @@ -426,7 +426,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { - td, err := ioutil.TempDir(filepath.Join(o.root, "snapshots"), "new-") + td, err := ioutil.TempDir(snapshotDir, "new-") if err != nil { return "", errors.Wrap(err, "failed to create temp dir") } diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index d11252d1e..68a807582 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -160,9 +160,13 @@ func (u *Usage) Add(other Usage) { // layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file. // // We start by using a Snapshotter to Prepare a new snapshot transaction, using a -// key and descending from the empty parent "": +// key and descending from the empty parent "". To prevent our layer from being +// garbage collected during unpacking, we add the `containerd.io/gc.root` label: // -// mounts, err := snapshotter.Prepare(ctx, key, "") +// noGcOpt := snapshotter.WithLabels(map[string]string{ +// "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), +// }) +// mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt) // if err != nil { ... } // // We get back a list of mounts from Snapshotter.Prepare, with the key identifying @@ -191,15 +195,13 @@ func (u *Usage) Add(other Usage) { // // Now that we've verified and unpacked our layer, we commit the active // snapshot to a name. For this example, we are just going to use the layer -// digest, but in practice, this will probably be the ChainID: +// digest, but in practice, this will probably be the ChainID. This also removes +// the active snapshot: // -// if err := snapshotter.Commit(ctx, digest.String(), key); err != nil { ... } +// if err := snapshotter.Commit(ctx, digest.String(), key, noGcOpt); err != nil { ... } // // Now, we have a layer in the Snapshotter that can be accessed with the digest -// provided during commit. Once you have committed the snapshot, the active -// snapshot can be removed with the following: -// -// snapshotter.Remove(ctx, key) +// provided during commit. // // Importing the Next Layer // @@ -207,7 +209,7 @@ func (u *Usage) Add(other Usage) { // above except that the parent is provided as parent when calling // Manager.Prepare, assuming a clean, unique key identifier: // -// mounts, err := snapshotter.Prepare(ctx, key, parentDigest) +// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) // // We then mount, apply and commit, as we did above. The new snapshot will be // based on the content of the previous one. diff --git a/vendor/github.com/containerd/containerd/archive/time_darwin.go b/vendor/github.com/containerd/containerd/version/version.go similarity index 63% rename from vendor/github.com/containerd/containerd/archive/time_darwin.go rename to vendor/github.com/containerd/containerd/version/version.go index 9c2b656b0..b2874bf62 100644 --- a/vendor/github.com/containerd/containerd/archive/time_darwin.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -14,17 +14,16 @@ limitations under the License. */ -package archive +package version -import ( - "time" +var ( + // Package is filled at linking time + Package = "github.com/containerd/containerd" - "github.com/pkg/errors" -) + // Version holds the complete version number. Filled in at linking time. + Version = "1.2.0+unknown" -// as at MacOS 10.12 there is apparently no way to set timestamps -// with nanosecond precision. We could fall back to utimes/lutimes -// and lose the precision as a temporary workaround. -func chtimes(path string, atime, mtime time.Time) error { - return errors.New("OSX missing UtimesNanoAt") -} + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/containerd/continuity/.travis.yml b/vendor/github.com/containerd/continuity/.travis.yml index 05d6f62bb..c601f807e 100644 --- a/vendor/github.com/containerd/continuity/.travis.yml +++ b/vendor/github.com/containerd/continuity/.travis.yml @@ -15,8 +15,19 @@ env: - TRAVIS_GOOS=linux - TRAVIS_GOOS=darwin +install: + - go get -u github.com/vbatts/git-validation + - go get -u github.com/kunalkushwaha/ltag + - go get -u github.com/LK4D4/vndr + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + script: - export GOOS=${TRAVIS_GOOS} + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - ../project/script/validate/vendor - make build binaries - if [ "$GOOS" = "linux" ]; then make vet test; fi - if [ "$GOOS" != "linux" ]; then make test-compile; fi diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/activation/files.go b/vendor/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 000000000..29dd18def --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,67 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "strings" + "syscall" +) + +const ( + // listenFdsStart corresponds to `SD_LISTEN_FDS_START`. + listenFdsStart = 3 +) + +// Files returns a slice containing a `os.File` object for each +// file descriptor passed to this process via systemd fd-passing protocol. +// +// The order of the file descriptors is preserved in the returned slice. +// `unsetEnv` is typically set to `true` in order to avoid clashes in +// fd usage and to avoid leaking environment flags to child processes. +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + defer os.Unsetenv("LISTEN_FDNAMES") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + names := strings.Split(os.Getenv("LISTEN_FDNAMES"), ":") + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + name := "LISTEN_FD_" + strconv.Itoa(fd) + offset := fd - listenFdsStart + if offset < len(names) && len(names[offset]) > 0 { + name = names[offset] + } + files = append(files, os.NewFile(uintptr(fd), name)) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 000000000..bb5cc2311 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,103 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners() ([]net.Listener, error) { + files := Files(true) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + f.Close() + } + } + return listeners, nil +} + +// ListenersWithNames maps a listener name to a set of net.Listener instances. +func ListenersWithNames() (map[string][]net.Listener, error) { + files := Files(true) + listeners := map[string][]net.Listener{} + + for _, f := range files { + if pc, err := net.FileListener(f); err == nil { + current, ok := listeners[f.Name()] + if !ok { + listeners[f.Name()] = []net.Listener{pc} + } else { + listeners[f.Name()] = append(current, pc) + } + f.Close() + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil && err == nil { + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} + +// TLSListenersWithNames maps a listener name to a net.Listener with +// the associated TLS configuration. +func TLSListenersWithNames(tlsConfig *tls.Config) (map[string][]net.Listener, error) { + listeners, err := ListenersWithNames() + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil && err == nil { + for _, ll := range listeners { + // Activate TLS only for TCP sockets + for i, l := range ll { + if l.Addr().Network() == "tcp" { + ll[i] = tls.NewListener(l, tlsConfig) + } + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/activation/packetconns.go new file mode 100644 index 000000000..a97206785 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns.go @@ -0,0 +1,38 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns() ([]net.PacketConn, error) { + files := Files(true) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + f.Close() + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 000000000..f652582e6 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// New establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + conn, err := NewSystemConnection() + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnection() + } + return conn, err +} + +// NewSystemConnection establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection +func NewSystemConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 000000000..6a0aa6569 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,594 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. +func (c *Conn) SystemState() (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +// GetUnitProperties takes the (escaped) unit path and returns all of its dbus object properties. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetServiceProperty returns property for given service name and property name +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// ListUnitsFiltered returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// ListUnitsByPatterns returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// ListUnitsByNames returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// Note: Requires systemd v230 or higher +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// MaskUnitFiles masks one or more units in the system +// +// It takes three arguments: +// * list of units to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// * force flag +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// UnmaskUnitFiles unmasks one or more units in the system +// +// It takes two arguments: +// * list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 000000000..6c8189587 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 000000000..17c5d4856 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 000000000..70e63a6f1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,333 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 000000000..5b408d584 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go new file mode 100644 index 000000000..7828ce6f0 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util.go @@ -0,0 +1,90 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util contains utility functions related to systemd that applications +// can use to check things like whether systemd is running. Note that some of +// these functions attempt to manually load systemd libraries at runtime rather +// than linking against them. +package util + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + ErrNoCGO = fmt.Errorf("go-systemd built with CGO disabled") +) + +// GetRunningSlice attempts to retrieve the name of the systemd slice in which +// the current process is running. +// This function is a wrapper around the libsystemd C library; if it cannot be +// opened, an error is returned. +func GetRunningSlice() (string, error) { + return getRunningSlice() +} + +// RunningFromSystemService tries to detect whether the current process has +// been invoked from a system service. The condition for this is whether the +// process is _not_ a user process. User processes are those running in session +// scopes or under per-user `systemd --user` instances. +// +// To avoid false positives on systems without `pam_systemd` (which is +// responsible for creating user sessions), this function also uses a heuristic +// to detect whether it's being invoked from a session leader process. This is +// the case if the current process is executed directly from a service file +// (e.g. with `ExecStart=/this/cmd`). Note that this heuristic will fail if the +// command is instead launched in a subshell or similar so that it is not +// session leader (e.g. `ExecStart=/bin/bash -c "/this/cmd"`) +// +// This function is a wrapper around the libsystemd C library; if this is +// unable to successfully open a handle to the library for any reason (e.g. it +// cannot be found), an error will be returned. +func RunningFromSystemService() (bool, error) { + return runningFromSystemService() +} + +// CurrentUnitName attempts to retrieve the name of the systemd system unit +// from which the calling process has been invoked. It wraps the systemd +// `sd_pid_get_unit` call, with the same caveat: for processes not part of a +// systemd system unit, this function will return an error. +func CurrentUnitName() (string, error) { + return currentUnitName() +} + +// IsRunningSystemd checks whether the host was booted with systemd as its init +// system. This functions similarly to systemd's `sd_booted(3)`: internally, it +// checks whether /run/systemd/system/ exists and is a directory. +// http://www.freedesktop.org/software/systemd/man/sd_booted.html +func IsRunningSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} + +// GetMachineID returns a host's 128-bit machine ID as a string. This functions +// similarly to systemd's `sd_id128_get_machine`: internally, it simply reads +// the contents of /etc/machine-id +// http://www.freedesktop.org/software/systemd/man/sd_id128_get_machine.html +func GetMachineID() (string, error) { + machineID, err := ioutil.ReadFile("/etc/machine-id") + if err != nil { + return "", fmt.Errorf("failed to read /etc/machine-id: %v", err) + } + return strings.TrimSpace(string(machineID)), nil +} diff --git a/vendor/github.com/coreos/go-systemd/util/util_cgo.go b/vendor/github.com/coreos/go-systemd/util/util_cgo.go new file mode 100644 index 000000000..6269bc732 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util_cgo.go @@ -0,0 +1,175 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build cgo + +package util + +// #include +// #include +// #include +// +// int +// my_sd_pid_get_owner_uid(void *f, pid_t pid, uid_t *uid) +// { +// int (*sd_pid_get_owner_uid)(pid_t, uid_t *); +// +// sd_pid_get_owner_uid = (int (*)(pid_t, uid_t *))f; +// return sd_pid_get_owner_uid(pid, uid); +// } +// +// int +// my_sd_pid_get_unit(void *f, pid_t pid, char **unit) +// { +// int (*sd_pid_get_unit)(pid_t, char **); +// +// sd_pid_get_unit = (int (*)(pid_t, char **))f; +// return sd_pid_get_unit(pid, unit); +// } +// +// int +// my_sd_pid_get_slice(void *f, pid_t pid, char **slice) +// { +// int (*sd_pid_get_slice)(pid_t, char **); +// +// sd_pid_get_slice = (int (*)(pid_t, char **))f; +// return sd_pid_get_slice(pid, slice); +// } +// +// int +// am_session_leader() +// { +// return (getsid(0) == getpid()); +// } +import "C" +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/coreos/pkg/dlopen" +) + +var libsystemdNames = []string{ + // systemd < 209 + "libsystemd-login.so.0", + "libsystemd-login.so", + + // systemd >= 209 merged libsystemd-login into libsystemd proper + "libsystemd.so.0", + "libsystemd.so", +} + +func getRunningSlice() (slice string, err error) { + var h *dlopen.LibHandle + h, err = dlopen.GetHandle(libsystemdNames) + if err != nil { + return + } + defer func() { + if err1 := h.Close(); err1 != nil { + err = err1 + } + }() + + sd_pid_get_slice, err := h.GetSymbolPointer("sd_pid_get_slice") + if err != nil { + return + } + + var s string + sl := C.CString(s) + defer C.free(unsafe.Pointer(sl)) + + ret := C.my_sd_pid_get_slice(sd_pid_get_slice, 0, &sl) + if ret < 0 { + err = fmt.Errorf("error calling sd_pid_get_slice: %v", syscall.Errno(-ret)) + return + } + + return C.GoString(sl), nil +} + +func runningFromSystemService() (ret bool, err error) { + var h *dlopen.LibHandle + h, err = dlopen.GetHandle(libsystemdNames) + if err != nil { + return + } + defer func() { + if err1 := h.Close(); err1 != nil { + err = err1 + } + }() + + sd_pid_get_owner_uid, err := h.GetSymbolPointer("sd_pid_get_owner_uid") + if err != nil { + return + } + + var uid C.uid_t + errno := C.my_sd_pid_get_owner_uid(sd_pid_get_owner_uid, 0, &uid) + serrno := syscall.Errno(-errno) + // when we're running from a unit file, sd_pid_get_owner_uid returns + // ENOENT (systemd <220), ENXIO (systemd 220-223), or ENODATA + // (systemd >=234) + switch { + case errno >= 0: + ret = false + case serrno == syscall.ENOENT, serrno == syscall.ENXIO, serrno == syscall.ENODATA: + // Since the implementation of sessions in systemd relies on + // the `pam_systemd` module, using the sd_pid_get_owner_uid + // heuristic alone can result in false positives if that module + // (or PAM itself) is not present or properly configured on the + // system. As such, we also check if we're the session leader, + // which should be the case if we're invoked from a unit file, + // but not if e.g. we're invoked from the command line from a + // user's login session + ret = C.am_session_leader() == 1 + default: + err = fmt.Errorf("error calling sd_pid_get_owner_uid: %v", syscall.Errno(-errno)) + } + return +} + +func currentUnitName() (unit string, err error) { + var h *dlopen.LibHandle + h, err = dlopen.GetHandle(libsystemdNames) + if err != nil { + return + } + defer func() { + if err1 := h.Close(); err1 != nil { + err = err1 + } + }() + + sd_pid_get_unit, err := h.GetSymbolPointer("sd_pid_get_unit") + if err != nil { + return + } + + var s string + u := C.CString(s) + defer C.free(unsafe.Pointer(u)) + + ret := C.my_sd_pid_get_unit(sd_pid_get_unit, 0, &u) + if ret < 0 { + err = fmt.Errorf("error calling sd_pid_get_unit: %v", syscall.Errno(-ret)) + return + } + + unit = C.GoString(u) + return +} diff --git a/vendor/github.com/coreos/go-systemd/util/util_stub.go b/vendor/github.com/coreos/go-systemd/util/util_stub.go new file mode 100644 index 000000000..477589e12 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util_stub.go @@ -0,0 +1,23 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !cgo + +package util + +func getRunningSlice() (string, error) { return "", ErrNoCGO } + +func runningFromSystemService() (bool, error) { return false, ErrNoCGO } + +func currentUnitName() (string, error) { return "", ErrNoCGO } diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen.go b/vendor/github.com/coreos/pkg/dlopen/dlopen.go new file mode 100644 index 000000000..23774f612 --- /dev/null +++ b/vendor/github.com/coreos/pkg/dlopen/dlopen.go @@ -0,0 +1,82 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dlopen provides some convenience functions to dlopen a library and +// get its symbols. +package dlopen + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" +import ( + "errors" + "fmt" + "unsafe" +) + +var ErrSoNotFound = errors.New("unable to open a handle to the library") + +// LibHandle represents an open handle to a library (.so) +type LibHandle struct { + Handle unsafe.Pointer + Libname string +} + +// GetHandle tries to get a handle to a library (.so), attempting to access it +// by the names specified in libs and returning the first that is successfully +// opened. Callers are responsible for closing the handler. If no library can +// be successfully opened, an error is returned. +func GetHandle(libs []string) (*LibHandle, error) { + for _, name := range libs { + libname := C.CString(name) + defer C.free(unsafe.Pointer(libname)) + handle := C.dlopen(libname, C.RTLD_LAZY) + if handle != nil { + h := &LibHandle{ + Handle: handle, + Libname: name, + } + return h, nil + } + } + return nil, ErrSoNotFound +} + +// GetSymbolPointer takes a symbol name and returns a pointer to the symbol. +func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) { + sym := C.CString(symbol) + defer C.free(unsafe.Pointer(sym)) + + C.dlerror() + p := C.dlsym(l.Handle, sym) + e := C.dlerror() + if e != nil { + return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e))) + } + + return p, nil +} + +// Close closes a LibHandle. +func (l *LibHandle) Close() error { + C.dlerror() + C.dlclose(l.Handle) + e := C.dlerror() + if e != nil { + return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e))) + } + + return nil +} diff --git a/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go new file mode 100644 index 000000000..48a660104 --- /dev/null +++ b/vendor/github.com/coreos/pkg/dlopen/dlopen_example.go @@ -0,0 +1,56 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build linux + +package dlopen + +// #include +// #include +// +// int +// my_strlen(void *f, const char *s) +// { +// size_t (*strlen)(const char *); +// +// strlen = (size_t (*)(const char *))f; +// return strlen(s); +// } +import "C" + +import ( + "fmt" + "unsafe" +) + +func strlen(libs []string, s string) (int, error) { + h, err := GetHandle(libs) + if err != nil { + return -1, fmt.Errorf(`couldn't get a handle to the library: %v`, err) + } + defer h.Close() + + f := "strlen" + cs := C.CString(s) + defer C.free(unsafe.Pointer(cs)) + + strlen, err := h.GetSymbolPointer(f) + if err != nil { + return -1, fmt.Errorf(`couldn't get symbol %q: %v`, f, err) + } + + len := C.my_strlen(strlen, cs) + + return int(len), nil +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index bc1edce71..e29faa19f 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -8,6 +8,7 @@ Aaron.L.Xu Abdur Rehman Abhinandan Prativadi Abin Shahab +Ace Tang Addam Hardy Adolfo Ochagavía Adrien Duermael @@ -23,6 +24,7 @@ Albert Callarisa Aleksa Sarai Alessandro Boch Alex Mavrogiannis +Alex Mayer Alexander Boyd Alexander Larsson Alexander Morozov @@ -37,6 +39,7 @@ Amir Goldstein Amit Krishnan Amit Shukla Amy Lindburg +Anda Xu Andrea Luzzardi Andreas Köhler Andrew France @@ -50,6 +53,7 @@ Andy Goldstein Andy Rothfusz Anil Madhavapeddy Ankush Agarwal +Anne Henmi Anton Polonskiy Antonio Murdaca Antonis Kalipetis @@ -65,6 +69,7 @@ BastianHofmann Ben Bonnefoy Ben Firshman Benjamin Boudreau +Benoit Sigoure Bhumika Bayani Bill Wang Bin Liu @@ -73,6 +78,7 @@ Boaz Shuster Bogdan Anton Boris Pruessmann Bradley Cicenas +Brandon Mitchell Brandon Philips Brent Salisbury Bret Fisher @@ -89,6 +95,7 @@ Carlos Alexandro Becker Ce Gao Cedric Davies Cezar Sa Espinola +Chad Faragher Chao Wang Charles Chan Charles Law @@ -109,8 +116,9 @@ Christian Stefanescu Christophe Robin Christophe Vidal Christopher Biscardi +Christopher Crone Christopher Jones -Christy Perez +Christy Norman Chun Chen Clinton Kitson Coenraad Loubser @@ -173,6 +181,7 @@ Dong Chen Doug Davis Drew Erny Ed Costello +Elango Sivanandam Eli Uriegas Eli Uriegas Elias Faxö @@ -183,7 +192,9 @@ Eric-Olivier Lamey Erica Windisch Erik Hollensbe Erik St. Martin +Essam A. Hassan Ethan Haynes +Euan Kemp Eugene Yakubovich Evan Allrich Evan Hazlett @@ -198,6 +209,7 @@ Flavio Crisciani Florian Klein Foysal Iqbal Fred Lifton +Frederic Hemberger Frederick F. Kautz IV Frederik Nordahl Jul Sabroe Frieder Bluemle @@ -215,6 +227,7 @@ Grant Reaber Greg Pflaum Guilhem Lettron Guillaume J. Charmes +Guillaume Le Floch gwx296173 Günther Jungbluth Hakan Özler @@ -239,12 +252,14 @@ Ignacio Capurro Ilya Dmitrichenko Ilya Khlopotov Ilya Sotkov +Ioan Eugen Stan Isabel Jimenez Ivan Grcic Ivan Markin Jacob Atzen Jacob Tomlinson Jaivish Kothari +Jake Lambert Jake Sanders James Nesbitt James Turnbull @@ -314,7 +329,9 @@ Julien Maitrehenry Justas Brazauskas Justin Cormack Justin Simonelis +Justyn Temme Jyrki Puttonen +Jérémie Drouet Jérôme Petazzoni Jörg Thalheim Kai Blin @@ -350,6 +367,7 @@ Lai Jiangshan Lars Kellogg-Stedman Laura Frank Laurent Erignoux +Lee Gaines Lei Jitang Lennie Leo Gallucci @@ -357,6 +375,8 @@ Lewis Daly Li Yi Li Yi Liang-Chi Hsieh +Lifubang +Lihua Tang Lily Guo Lin Lu Linus Heckemann @@ -384,18 +404,22 @@ Mansi Nahar mapk0y Marc Bihlmaier Marco Mariani +Marco Vedovati Marcus Martins Marianna Tessel Marius Sturm Mark Oates +Marsh Macy Martin Mosegaard Amdisen Mary Anthony Mason Malone Mateusz Major +Mathieu Champlon Matt Gucci Matt Robenolt Matthew Heon Matthieu Hauglustaine +Mauro Porras P Max Shytikov Maxime Petazzoni Mei ChunTao @@ -425,6 +449,7 @@ Mike MacCana mikelinjie <294893458@qq.com> Mikhail Vasin Milind Chawre +Mindaugas Rukas Misty Stanley-Jones Mohammad Banikazemi Mohammed Aaqib Ansari @@ -445,18 +470,21 @@ Nathan Hsieh Nathan LeClaire Nathan McCauley Neil Peterson +Nico Stapelbroek Nicola Kabar Nicolas Borboën Nicolas De Loof Nikhil Chawla Nikolas Garofil Nikolay Milovanov +Nir Soffer Nishant Totla NIWA Hideyuki Noah Treuhaft O.S. Tezer ohmystack Olle Jonsson +Olli Janatuinen Otto Kekäläinen Ovidio Mallo Pascal Borreli @@ -474,6 +502,7 @@ Per Lundberg Peter Edge Peter Hsu Peter Jaffe +Peter Kehl Peter Nagy Peter Salvatore Peter Waller @@ -511,6 +540,7 @@ Roman Dudin Rory Hunter Ross Boucher Rubens Figueiredo +Rui Cao Ryan Belgrave Ryan Detzel Ryan Stelly @@ -519,8 +549,10 @@ Sakeven Jiang Sally O'Malley Sam Neirinck Sambuddha Basu +Sami Tabet Samuel Karp Santhosh Manohar +Scott Brenner Scott Collier Sean Christopherson Sean Rodman @@ -562,13 +594,15 @@ Sylvain Baubeau Sébastien HOUZÉ T K Sourabh TAGOMORI Satoshi +taiji-tech Taylor Jones +Tejaswini Duggaraju Thatcher Peskens Thomas Gazagnaire Thomas Krzero Thomas Leonard Thomas Léveil -Thomas Riccardi +Thomas Riccardi Thomas Swift Tianon Gravi Tianyi Wang @@ -585,6 +619,8 @@ Tobias Gesellchen Todd Whiteman Tom Denham Tom Fotherby +Tom Klingenberg +Tom Milligan Tom X. Tobin Tomas Tomecek Tomasz Kopczynski @@ -603,6 +639,7 @@ Veres Lajos Victor Vieux Victoria Bialas Viktor Stanchev +Vimal Raghubir Vincent Batts Vincent Bernat Vincent Demeester @@ -610,6 +647,7 @@ Vincent Woo Vishnu Kannan Vivek Goyal Wang Jie +Wang Lei Wang Long Wang Ping Wang Xing @@ -622,6 +660,8 @@ Wes Morgan Wewang Xiaorenfine William Henry Xianglin Gao +Xiaodong Zhang +Xiaoxi He Xinbo Weng Xuecong Liao Yan Feng @@ -633,7 +673,9 @@ Yong Tang Yosef Fertel Yu Peng Yuan Sun +Yue Zhang Yunxiang Huang +Zachary Romero zebrilee Zhang Kun Zhang Wei diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 9161921a2..2a6c5f0ea 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -8,7 +8,7 @@ import ( "github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/credentials" - "github.com/docker/docker/api/types" + "github.com/docker/cli/cli/config/types" "github.com/docker/docker/pkg/homedir" "github.com/pkg/errors" ) @@ -18,6 +18,7 @@ const ( ConfigFileName = "config.json" configFileDir = ".docker" oldConfigfile = ".dockercfg" + contextsDir = "contexts" ) var ( @@ -35,11 +36,21 @@ func Dir() string { return configDir } +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + // SetDir sets the directory the configuration file is stored in func SetDir(dir string) { configDir = dir } +// Path returns the path to a file relative to the config dir +func Path(p ...string) string { + return filepath.Join(append([]string{Dir()}, p...)...) +} + // LegacyLoadFromReader is a convenience function that creates a ConfigFile object from // a non-nested reader func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index 7fa9b734b..656184993 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -11,8 +11,7 @@ import ( "strings" "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/opts" - "github.com/docker/docker/api/types" + "github.com/docker/cli/cli/config/types" "github.com/pkg/errors" ) @@ -48,6 +47,8 @@ type ConfigFile struct { Experimental string `json:"experimental,omitempty"` StackOrchestrator string `json:"stackOrchestrator,omitempty"` Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` } // ProxyConfig contains proxy configuration settings @@ -194,7 +195,7 @@ func (configFile *ConfigFile) Save() error { // ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and // then checking this against any environment variables provided to the container -func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) map[string]*string { +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { var cfgKey string if _, ok := configFile.Proxies[host]; !ok { @@ -210,7 +211,10 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) ma "NO_PROXY": &config.NoProxy, "FTP_PROXY": &config.FTPProxy, } - m := opts.ConvertKVStringsToMapWithNil(runOpts) + m := runOpts + if m == nil { + m = make(map[string]*string) + } for k := range permitted { if *permitted[k] == "" { continue diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go index ca874cac5..28d58ec48 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -1,7 +1,7 @@ package credentials import ( - "github.com/docker/docker/api/types" + "github.com/docker/cli/cli/config/types" ) // Store is the interface that any credentials store must implement. diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index 6ae681754..e509820b7 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -1,8 +1,9 @@ package credentials import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/registry" + "strings" + + "github.com/docker/cli/cli/config/types" ) type store interface { @@ -35,7 +36,7 @@ func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing for r, ac := range c.file.GetAuthConfigs() { - if serverAddress == registry.ConvertToHostname(r) { + if serverAddress == ConvertToHostname(r) { return ac, nil } } @@ -62,3 +63,19 @@ func (c *fileStore) GetFilename() string { func (c *fileStore) IsFileStore() bool { return true } + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go index ef3aab4ad..afe542cc3 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -1,9 +1,9 @@ package credentials import ( + "github.com/docker/cli/cli/config/types" "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/api/types" ) const ( diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 000000000..056af6b84 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/cli/opts/config.go b/vendor/github.com/docker/cli/opts/config.go deleted file mode 100644 index 82fd2bce4..000000000 --- a/vendor/github.com/docker/cli/opts/config.go +++ /dev/null @@ -1,98 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - swarmtypes "github.com/docker/docker/api/types/swarm" -) - -// ConfigOpt is a Value type for parsing configs -type ConfigOpt struct { - values []*swarmtypes.ConfigReference -} - -// Set a new config value -func (o *ConfigOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &swarmtypes.ConfigReference{ - File: &swarmtypes.ConfigReferenceFileTarget{ - UID: "0", - GID: "0", - Mode: 0444, - }, - } - - // support a simple syntax of --config foo - if len(fields) == 1 { - options.File.Name = fields[0] - options.ConfigName = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - options.ConfigName = value - case "target": - options.File.Name = value - case "uid": - options.File.UID = value - case "gid": - options.File.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.File.Mode = os.FileMode(m) - default: - return fmt.Errorf("invalid field in config request: %s", key) - } - } - - if options.ConfigName == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *ConfigOpt) Type() string { - return "config" -} - -// String returns a string repr of this option -func (o *ConfigOpt) String() string { - configs := []string{} - for _, config := range o.values { - repr := fmt.Sprintf("%s -> %s", config.ConfigName, config.File.Name) - configs = append(configs, repr) - } - return strings.Join(configs, ", ") -} - -// Value returns the config requests -func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference { - return o.values -} diff --git a/vendor/github.com/docker/cli/opts/duration.go b/vendor/github.com/docker/cli/opts/duration.go deleted file mode 100644 index 5dc6eeaa7..000000000 --- a/vendor/github.com/docker/cli/opts/duration.go +++ /dev/null @@ -1,64 +0,0 @@ -package opts - -import ( - "time" - - "github.com/pkg/errors" -) - -// PositiveDurationOpt is an option type for time.Duration that uses a pointer. -// It behave similarly to DurationOpt but only allows positive duration values. -type PositiveDurationOpt struct { - DurationOpt -} - -// Set a new value on the option. Setting a negative duration value will cause -// an error to be returned. -func (d *PositiveDurationOpt) Set(s string) error { - err := d.DurationOpt.Set(s) - if err != nil { - return err - } - if *d.DurationOpt.value < 0 { - return errors.Errorf("duration cannot be negative") - } - return nil -} - -// DurationOpt is an option type for time.Duration that uses a pointer. This -// allows us to get nil values outside, instead of defaulting to 0 -type DurationOpt struct { - value *time.Duration -} - -// NewDurationOpt creates a DurationOpt with the specified duration -func NewDurationOpt(value *time.Duration) *DurationOpt { - return &DurationOpt{ - value: value, - } -} - -// Set a new value on the option -func (d *DurationOpt) Set(s string) error { - v, err := time.ParseDuration(s) - d.value = &v - return err -} - -// Type returns the type of this option, which will be displayed in `--help` output -func (d *DurationOpt) Type() string { - return "duration" -} - -// String returns a string repr of this option -func (d *DurationOpt) String() string { - if d.value != nil { - return d.value.String() - } - return "" -} - -// Value returns the time.Duration -func (d *DurationOpt) Value() *time.Duration { - return d.value -} diff --git a/vendor/github.com/docker/cli/opts/env.go b/vendor/github.com/docker/cli/opts/env.go deleted file mode 100644 index e6ddd7330..000000000 --- a/vendor/github.com/docker/cli/opts/env.go +++ /dev/null @@ -1,46 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "runtime" - "strings" -) - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -// -// The only validation here is to check if name is empty, per #25099 -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if arr[0] == "" { - return "", fmt.Errorf("invalid environment variable: %s", val) - } - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if runtime.GOOS == "windows" { - // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. - if strings.EqualFold(parts[0], name) { - return true - } - } - if parts[0] == name { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/cli/opts/envfile.go b/vendor/github.com/docker/cli/opts/envfile.go deleted file mode 100644 index 69d3ca6f6..000000000 --- a/vendor/github.com/docker/cli/opts/envfile.go +++ /dev/null @@ -1,22 +0,0 @@ -package opts - -import ( - "os" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - return parseKeyValueFile(filename, os.LookupEnv) -} diff --git a/vendor/github.com/docker/cli/opts/file.go b/vendor/github.com/docker/cli/opts/file.go deleted file mode 100644 index 1721a46ef..000000000 --- a/vendor/github.com/docker/cli/opts/file.go +++ /dev/null @@ -1,77 +0,0 @@ -package opts - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unicode" - "unicode/utf8" -) - -var whiteSpaces = " \t" - -// ErrBadKey typed error for bad environment variable -type ErrBadKey struct { - msg string -} - -func (e ErrBadKey) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} - -func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - currentLine := 0 - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - if !utf8.Valid(scannedBytes) { - return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) - } - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - // trim the line from all leading whitespace first - line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - if len(variable) == 0 { - return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)} - } - - if len(data) > 1 { - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - var value string - var present bool - if emptyFn != nil { - value, present = emptyFn(line) - } - if present { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value)) - } - } - } - } - return lines, scanner.Err() -} diff --git a/vendor/github.com/docker/cli/opts/hosts.go b/vendor/github.com/docker/cli/opts/hosts.go deleted file mode 100644 index 594cccf2f..000000000 --- a/vendor/github.com/docker/cli/opts/hosts.go +++ /dev/null @@ -1,165 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for TLS - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} diff --git a/vendor/github.com/docker/cli/opts/hosts_unix.go b/vendor/github.com/docker/cli/opts/hosts_unix.go deleted file mode 100644 index 611407a9d..000000000 --- a/vendor/github.com/docker/cli/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/cli/opts/hosts_windows.go b/vendor/github.com/docker/cli/opts/hosts_windows.go deleted file mode 100644 index 7c239e00f..000000000 --- a/vendor/github.com/docker/cli/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/cli/opts/ip.go b/vendor/github.com/docker/cli/opts/ip.go deleted file mode 100644 index fb03b5011..000000000 --- a/vendor/github.com/docker/cli/opts/ip.go +++ /dev/null @@ -1,47 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} - -// Type returns the type of the option -func (o *IPOpt) Type() string { - return "ip" -} diff --git a/vendor/github.com/docker/cli/opts/mount.go b/vendor/github.com/docker/cli/opts/mount.go deleted file mode 100644 index 3aa984942..000000000 --- a/vendor/github.com/docker/cli/opts/mount.go +++ /dev/null @@ -1,174 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []mounttypes.Mount -} - -// Set a new mount value -// nolint: gocyclo -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := mounttypes.Mount{} - - volumeOptions := func() *mounttypes.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *mounttypes.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(mounttypes.BindOptions) - } - return mount.BindOptions - } - - tmpfsOptions := func() *mounttypes.TmpfsOptions { - if mount.TmpfsOptions == nil { - mount.TmpfsOptions = new(mounttypes.TmpfsOptions) - } - return mount.TmpfsOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = mounttypes.TypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = mounttypes.Type(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "consistency": - mount.Consistency = mounttypes.Consistency(strings.ToLower(value)) - case "bind-propagation": - bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for volume-nocopy: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - case "tmpfs-size": - sizeBytes, err := units.RAMInBytes(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().SizeBytes = sizeBytes - case "tmpfs-mode": - ui64, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().Mode = os.FileMode(ui64) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) - } - if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) - } - if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { - return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []mounttypes.Mount { - return m.values -} diff --git a/vendor/github.com/docker/cli/opts/network.go b/vendor/github.com/docker/cli/opts/network.go deleted file mode 100644 index ec4967ff3..000000000 --- a/vendor/github.com/docker/cli/opts/network.go +++ /dev/null @@ -1,106 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strings" -) - -const ( - networkOptName = "name" - networkOptAlias = "alias" - driverOpt = "driver-opt" -) - -// NetworkAttachmentOpts represents the network options for endpoint creation -type NetworkAttachmentOpts struct { - Target string - Aliases []string - DriverOpts map[string]string -} - -// NetworkOpt represents a network config in swarm mode. -type NetworkOpt struct { - options []NetworkAttachmentOpts -} - -// Set networkopts value -func (n *NetworkOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - - var netOpt NetworkAttachmentOpts - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - netOpt.Aliases = []string{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - - if len(parts) < 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.TrimSpace(strings.ToLower(parts[0])) - value := strings.TrimSpace(strings.ToLower(parts[1])) - - switch key { - case networkOptName: - netOpt.Target = value - case networkOptAlias: - netOpt.Aliases = append(netOpt.Aliases, value) - case driverOpt: - key, value, err = parseDriverOpt(value) - if err == nil { - if netOpt.DriverOpts == nil { - netOpt.DriverOpts = make(map[string]string) - } - netOpt.DriverOpts[key] = value - } else { - return err - } - default: - return fmt.Errorf("invalid field key %s", key) - } - } - if len(netOpt.Target) == 0 { - return fmt.Errorf("network name/id is not specified") - } - } else { - netOpt.Target = value - } - n.options = append(n.options, netOpt) - return nil -} - -// Type returns the type of this option -func (n *NetworkOpt) Type() string { - return "network" -} - -// Value returns the networkopts -func (n *NetworkOpt) Value() []NetworkAttachmentOpts { - return n.options -} - -// String returns the network opts as a string -func (n *NetworkOpt) String() string { - return "" -} - -func parseDriverOpt(driverOpt string) (string, string, error) { - parts := strings.SplitN(driverOpt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("invalid key value pair format in driver options") - } - key := strings.TrimSpace(strings.ToLower(parts[0])) - value := strings.TrimSpace(strings.ToLower(parts[1])) - return key, value, nil -} diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go deleted file mode 100644 index 6485e309e..000000000 --- a/vendor/github.com/docker/cli/opts/opts.go +++ /dev/null @@ -1,509 +0,0 @@ -package opts - -import ( - "fmt" - "math/big" - "net" - "path" - "regexp" - "strings" - - "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" - "github.com/pkg/errors" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - if len(*opts.values) == 0 { - return "" - } - return fmt.Sprintf("%v", *opts.values) -} - -// Set validates if needed the input value and adds it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -// Type returns a string name for this Option type -func (opts *ListOpts) Type() string { - return "list" -} - -// WithValidator returns the ListOpts with validator set. -func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { - opts.validator = validator - return opts -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -// MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", opts.values) -} - -// Type returns a string name for this Option type -func (opts *MapOpts) Type() string { - return "map" -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, - } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} - -// ValidateProgressOutput errors out if an invalid value is passed to --progress -func ValidateProgressOutput(val string) error { - valid := []string{"auto", "plain", "tty"} - for _, s := range valid { - if s == val { - return nil - } - } - return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", ")) -} - -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} - -func (o *FilterOpt) String() string { - repr, err := filters.ToJSON(o.filter) - if err != nil { - return "invalid filters" - } - return repr -} - -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - if value == "" { - return nil - } - if !strings.Contains(value, "=") { - return errors.New("bad format of filter (expected name=value)") - } - f := strings.SplitN(value, "=", 2) - name := strings.ToLower(strings.TrimSpace(f[0])) - value = strings.TrimSpace(f[1]) - - o.filter.Add(name, value) - return nil -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} - -// NanoCPUs is a type for fixed point fractional number. -type NanoCPUs int64 - -// String returns the string format of the number -func (c *NanoCPUs) String() string { - if *c == 0 { - return "" - } - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -// Set sets the value of the NanoCPU by passing a string -func (c *NanoCPUs) Set(value string) error { - cpus, err := ParseCPUs(value) - *c = NanoCPUs(cpus) - return err -} - -// Type returns the type -func (c *NanoCPUs) Type() string { - return "decimal" -} - -// Value returns the value in int64 -func (c *NanoCPUs) Value() int64 { - return int64(*c) -} - -// ParseCPUs takes a string ratio and returns an integer value of nano cpus -func ParseCPUs(value string) (int64, error) { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return 0, fmt.Errorf("failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") - } - return nano.Num().Int64(), nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - _, _, err := ParseLink(val) - return val, err -} - -// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) -type MemBytes int64 - -// String returns the string format of the human readable memory bytes -func (m *MemBytes) String() string { - // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. - // We return "0" in case value is 0 here so that the default value is hidden. - // (Sometimes "default 0 B" is actually misleading) - if m.Value() != 0 { - return units.BytesSize(float64(m.Value())) - } - return "0" -} - -// Set sets the value of the MemBytes by passing a string -func (m *MemBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = MemBytes(val) - return err -} - -// Type returns the type -func (m *MemBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemBytes) Value() int64 { - return int64(*m) -} - -// UnmarshalJSON is the customized unmarshaler for MemBytes -func (m *MemBytes) UnmarshalJSON(s []byte) error { - if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { - return fmt.Errorf("invalid size: %q", s) - } - val, err := units.RAMInBytes(string(s[1 : len(s)-1])) - *m = MemBytes(val) - return err -} - -// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc). -// It differs from MemBytes in that -1 is valid and the default. -type MemSwapBytes int64 - -// Set sets the value of the MemSwapBytes by passing a string -func (m *MemSwapBytes) Set(value string) error { - if value == "-1" { - *m = MemSwapBytes(-1) - return nil - } - val, err := units.RAMInBytes(value) - *m = MemSwapBytes(val) - return err -} - -// Type returns the type -func (m *MemSwapBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemSwapBytes) Value() int64 { - return int64(*m) -} - -func (m *MemSwapBytes) String() string { - b := MemBytes(*m) - return b.String() -} - -// UnmarshalJSON is the customized unmarshaler for MemSwapBytes -func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { - b := MemBytes(*m) - return b.UnmarshalJSON(s) -} diff --git a/vendor/github.com/docker/cli/opts/opts_unix.go b/vendor/github.com/docker/cli/opts/opts_unix.go deleted file mode 100644 index 2766a43a0..000000000 --- a/vendor/github.com/docker/cli/opts/opts_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package opts - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 -const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/cli/opts/opts_windows.go b/vendor/github.com/docker/cli/opts/opts_windows.go deleted file mode 100644 index 98b7251a9..000000000 --- a/vendor/github.com/docker/cli/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP5. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/cli/opts/parse.go b/vendor/github.com/docker/cli/opts/parse.go deleted file mode 100644 index 70b60e142..000000000 --- a/vendor/github.com/docker/cli/opts/parse.go +++ /dev/null @@ -1,99 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter -func ReadKVStrings(files []string, override []string) ([]string, error) { - return readKVStrings(files, override, nil) -} - -// ReadKVEnvStrings reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter. -// If a key has no value, it will get the value from the environment. -func ReadKVEnvStrings(files []string, override []string) ([]string, error) { - return readKVStrings(files, override, os.LookupEnv) -} - -func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) { - variables := []string{} - for _, ef := range files { - parsedVars, err := parseKeyValueFile(ef, emptyFn) - if err != nil { - return nil, err - } - variables = append(variables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - variables = append(variables, override...) - - return variables, nil -} - -// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} -func ConvertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} - -// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} -// but set unset keys to nil - meaning the ones with no "=" in them. -// We use this in cases where we need to distinguish between -// FOO= and FOO -// where the latter case just means FOO was mentioned but not given a value -func ConvertKVStringsToMapWithNil(values []string) map[string]*string { - result := make(map[string]*string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = nil - } else { - result[kv[0]] = &kv[1] - } - } - - return result -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { - p := container.RestartPolicy{} - - if policy == "" { - return p, nil - } - - parts := strings.Split(policy, ":") - - if len(parts) > 2 { - return p, fmt.Errorf("invalid restart policy format") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, fmt.Errorf("maximum retry count must be an integer") - } - - p.MaximumRetryCount = count - } - - p.Name = parts[0] - - return p, nil -} diff --git a/vendor/github.com/docker/cli/opts/port.go b/vendor/github.com/docker/cli/opts/port.go deleted file mode 100644 index a4a91b1d5..000000000 --- a/vendor/github.com/docker/cli/opts/port.go +++ /dev/null @@ -1,172 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" - "github.com/sirupsen/logrus" -) - -const ( - portOptTargetPort = "target" - portOptPublishedPort = "published" - portOptProtocol = "protocol" - portOptMode = "mode" -) - -// PortOpt represents a port config in swarm mode. -type PortOpt struct { - ports []swarm.PortConfig -} - -// Set a new port value -// nolint: gocyclo -func (p *PortOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - pConfig := swarm.PortConfig{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case portOptProtocol: - if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) && value != string(swarm.PortConfigProtocolSCTP) { - return fmt.Errorf("invalid protocol value %s", value) - } - - pConfig.Protocol = swarm.PortConfigProtocol(value) - case portOptMode: - if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { - return fmt.Errorf("invalid publish mode value %s", value) - } - - pConfig.PublishMode = swarm.PortConfigPublishMode(value) - case portOptTargetPort: - tPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.TargetPort = uint32(tPort) - case portOptPublishedPort: - pPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.PublishedPort = uint32(pPort) - default: - return fmt.Errorf("invalid field key %s", key) - } - } - - if pConfig.TargetPort == 0 { - return fmt.Errorf("missing mandatory field %q", portOptTargetPort) - } - - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - - p.ports = append(p.ports, pConfig) - } else { - // short syntax - portConfigs := []swarm.PortConfig{} - ports, portBindingMap, err := nat.ParsePortSpecs([]string{value}) - if err != nil { - return err - } - for _, portBindings := range portBindingMap { - for _, portBinding := range portBindings { - if portBinding.HostIP != "" { - return fmt.Errorf("hostip is not supported") - } - } - } - - for port := range ports { - portConfig, err := ConvertPortToPortConfig(port, portBindingMap) - if err != nil { - return err - } - portConfigs = append(portConfigs, portConfig...) - } - p.ports = append(p.ports, portConfigs...) - } - return nil -} - -// Type returns the type of this option -func (p *PortOpt) Type() string { - return "port" -} - -// String returns a string repr of this option -func (p *PortOpt) String() string { - ports := []string{} - for _, port := range p.ports { - repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) - ports = append(ports, repr) - } - return strings.Join(ports, ", ") -} - -// Value returns the ports -func (p *PortOpt) Value() []swarm.PortConfig { - return p.ports -} - -// ConvertPortToPortConfig converts ports to the swarm type -func ConvertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) ([]swarm.PortConfig, error) { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - if binding.HostIP != "" && binding.HostIP != "0.0.0.0" { - logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port) - } - - startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort) - - if err != nil && binding.HostPort != "" { - return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) - } - - for i := startHostPort; i <= endHostPort; i++ { - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(i), - PublishMode: swarm.PortConfigPublishModeIngress, - }) - } - } - return ports, nil -} diff --git a/vendor/github.com/docker/cli/opts/quotedstring.go b/vendor/github.com/docker/cli/opts/quotedstring.go deleted file mode 100644 index 09c68a526..000000000 --- a/vendor/github.com/docker/cli/opts/quotedstring.go +++ /dev/null @@ -1,37 +0,0 @@ -package opts - -// QuotedString is a string that may have extra quotes around the value. The -// quotes are stripped from the value. -type QuotedString struct { - value *string -} - -// Set sets a new value -func (s *QuotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -// Type returns the type of the value -func (s *QuotedString) Type() string { - return "string" -} - -func (s *QuotedString) String() string { - return *s.value -} - -func trimQuotes(value string) string { - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} - -// NewQuotedString returns a new quoted string option -func NewQuotedString(value *string) *QuotedString { - return &QuotedString{value: value} -} diff --git a/vendor/github.com/docker/cli/opts/runtime.go b/vendor/github.com/docker/cli/opts/runtime.go deleted file mode 100644 index 4361b3ce0..000000000 --- a/vendor/github.com/docker/cli/opts/runtime.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} - -// Type returns the type of the option -func (o *RuntimeOpt) Type() string { - return "runtime" -} diff --git a/vendor/github.com/docker/cli/opts/secret.go b/vendor/github.com/docker/cli/opts/secret.go deleted file mode 100644 index a1fde54d9..000000000 --- a/vendor/github.com/docker/cli/opts/secret.go +++ /dev/null @@ -1,98 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - swarmtypes "github.com/docker/docker/api/types/swarm" -) - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*swarmtypes.SecretReference -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &swarmtypes.SecretReference{ - File: &swarmtypes.SecretReferenceFileTarget{ - UID: "0", - GID: "0", - Mode: 0444, - }, - } - - // support a simple syntax of --secret foo - if len(fields) == 1 { - options.File.Name = fields[0] - options.SecretName = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - options.SecretName = value - case "target": - options.File.Name = value - case "uid": - options.File.UID = value - case "gid": - options.File.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.File.Mode = os.FileMode(m) - default: - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - - if options.SecretName == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*swarmtypes.SecretReference { - return o.values -} diff --git a/vendor/github.com/docker/cli/opts/throttledevice.go b/vendor/github.com/docker/cli/opts/throttledevice.go deleted file mode 100644 index 0959efae3..000000000 --- a/vendor/github.com/docker/cli/opts/throttledevice.go +++ /dev/null @@ -1,108 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{Path: split[0], Rate: rate}, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - throttledevice = append(throttledevice, opt.values...) - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "list" -} diff --git a/vendor/github.com/docker/cli/opts/ulimit.go b/vendor/github.com/docker/cli/opts/ulimit.go deleted file mode 100644 index 5adfe3085..000000000 --- a/vendor/github.com/docker/cli/opts/ulimit.go +++ /dev/null @@ -1,57 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} diff --git a/vendor/github.com/docker/cli/opts/weightdevice.go b/vendor/github.com/docker/cli/opts/weightdevice.go deleted file mode 100644 index 46ce9b656..000000000 --- a/vendor/github.com/docker/cli/opts/weightdevice.go +++ /dev/null @@ -1,84 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - return opt.values -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "list" -} diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index d99106019..0f48321d4 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -1,9 +1,9 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland Brian Bland Brian Bland -Josh Hawn Josh Hawn +Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern Andrew Meredith Andrew Meredith @@ -16,3 +16,17 @@ davidli davidli Omer Cohen Omer Cohen Eric Yang Eric Yang Nikita Tarasov Nikita +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li 李楠 elsanli(李楠) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 252ff8aa2..000000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -a-palchikov -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Anusha Ragunathan -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Matthew Green -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victor Vieux -Victoria Bialas -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index e7b16b3c2..000000000 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,108 +0,0 @@ -# Changelog - -## 2.6.0 (2017-01-18) - -#### Storage -- S3: fixed bug in delete due to read-after-write inconsistency -- S3: allow EC2 IAM roles to be used when authorizing region endpoints -- S3: add Object ACL Support -- S3: fix delete method's notion of subpaths -- S3: use multipart upload API in `Move` method for performance -- S3: add v2 signature signing for legacy S3 clones -- Swift: add simple heuristic to detect incomplete DLOs during read ops -- Swift: support different user and tenant domains -- Swift: bulk deletes in chunks -- Aliyun OSS: fix delete method's notion of subpaths -- Aliyun OSS: optimize data copy after upload finishes -- Azure: close leaking response body -- Fix storage drivers dropping non-EOF errors when listing repositories -- Compare path properly when listing repositories in catalog -- Add a foreign layer URL host whitelist -- Improve catalog enumerate runtime - -#### Registry -- Export `storage.CreateOptions` in top-level package -- Enable notifications to endpoints that use self-signed certificates -- Properly validate multi-URL foreign layers -- Add control over validation of URLs in pushed manifests -- Proxy mode: fix socket leak when pull is cancelled -- Tag service: properly handle error responses on HEAD request -- Support for custom authentication URL in proxying registry -- Add configuration option to disable access logging -- Add notification filtering by target media type -- Manifest: `References()` returns all children -- Honor `X-Forwarded-Port` and Forwarded headers -- Reference: Preserve tag and digest in With* functions -- Add policy configuration for enforcing repository classes - -#### Client -- Changes the client Tags `All()` method to follow links -- Allow registry clients to connect via HTTP2 -- Better handling of OAuth errors in client - -#### Spec -- Manifest: clarify relationship between urls and foreign layers -- Authorization: add support for repository classes - -#### Manifest -- Override media type returned from `Stat()` for existing manifests -- Add plugin mediatype to distribution manifest - -#### Docs -- Document `TOOMANYREQUESTS` error code -- Document required Let's Encrypt port -- Improve documentation around implementation of OAuth2 -- Improve documentation for configuration - -#### Auth -- Add support for registry type in scope -- Add support for using v2 ping challenges for v1 -- Add leeway to JWT `nbf` and `exp` checking -- htpasswd: dynamically parse htpasswd file -- Fix missing auth headers with PATCH HTTP request when pushing to default port - -#### Dockerfile -- Update to go1.7 -- Reorder Dockerfile steps for better layer caching - -#### Notes - -Documentation has moved to the documentation repository at -`github.com/docker/docker.github.io/tree/master/registry` - -The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - -## 2.5.0 (2016-06-14) - -#### Storage -- Ensure uploads directory is cleaned after upload is committed -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -#### API -- Support returning HTTP 429 (Too Many Requests) - -#### Documentation -- Update auth documentation examples to show "expires in" as int - -#### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index e0d74684a..612e62ce1 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,20 +1,22 @@ -FROM golang:1.10-alpine +FROM golang:1.11-alpine AS build ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs ARG GOOS=linux ARG GOARCH=amd64 +ARG GOARM=6 RUN set -ex \ - && apk add --no-cache make git + && apk add --no-cache make git file WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries +RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" +FROM alpine +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index df89bb0c1..4635c6eca 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -43,9 +43,6 @@ TESTFLAGS_PARALLEL ?= 8 all: binaries -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - # This only needs to be generated by hand when cutting full releases. version/version.go: @echo "$(WHALE) $@" diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md deleted file mode 100644 index 73eba5a87..000000000 --- a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,44 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. - - Update the `CHANGELOG.md` file and create a PR to master with the updates. -Once that PR has been approved by maintainers the change may be cherry-picked -to the release branch (new release branches may be forked from this commit). - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - -``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format -`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added -to your Github account. The comment for the tag should include the release -notes, use previous tags as a guide for formatting consistently. Run -`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, -check comment and correct commit hash. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go index f4e915eed..54c8f3c94 100644 --- a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -163,7 +163,7 @@ func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType st }, } - m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + m.Manifests = make([]ManifestDescriptor, len(descriptors)) copy(m.Manifests, descriptors) deserialized := DeserializedManifestList{ @@ -177,7 +177,7 @@ func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType st // UnmarshalJSON populates a new ManifestList struct from JSON data. func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest list in canonical copy(m.canonical, b) diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go index 5a06b54bc..9fef4dc7e 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -108,7 +108,7 @@ type SignedManifest struct { // UnmarshalJSON populates a new SignedManifest struct from JSON data. func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - sm.all = make([]byte, len(b), len(b)) + sm.all = make([]byte, len(b)) // store manifest and signatures in all copy(sm.all, b) @@ -124,7 +124,7 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error { } // sm.Canonical stores the canonical manifest JSON - sm.Canonical = make([]byte, len(bytes), len(bytes)) + sm.Canonical = make([]byte, len(bytes)) copy(sm.Canonical, bytes) // Unmarshal canonical JSON into Manifest object diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go index a4f6032cd..0f1d386aa 100644 --- a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -58,7 +58,7 @@ func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Man func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { r, ok := d.(Reference) if !ok { - return fmt.Errorf("Unable to add non-reference type to v1 builder") + return fmt.Errorf("unable to add non-reference type to v1 builder") } // Entries need to be prepended diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go index ee29438fe..41f480292 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -106,7 +106,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) { // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest in canonical copy(m.canonical, b) diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 1816baea1..8f84a220a 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) { // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) -var mappings = make(map[string]UnmarshalFunc, 0) +var mappings = make(map[string]UnmarshalFunc) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go index 2d71fc5e9..b3dfb7a6d 100644 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87..8c0c23b2f 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go index 6d9bb4b62..4c35b879a 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) { for _, daErr := range errs { var err Error - switch daErr.(type) { + switch daErr := daErr.(type) { case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) + err = daErr.WithDetail(nil) case Error: - err = daErr.(Error) + err = daErr default: err = ErrorCodeUnknown.WithDetail(daErr) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 1337bdb12..3c3ec9893 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { u.RawQuery = merged.Encode() return u } - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go index 6e3f1ccc4..fe238210c 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -117,8 +117,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf index 7aec1d05b..12f71672f 100644 --- a/vendor/github.com/docker/distribution/vendor.conf +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -7,7 +7,7 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/denverdino/aliyungo 6df11717a253d9c7d4141f9af4deaa7c580cd531 github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 @@ -28,6 +28,7 @@ github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 4ef26fa6c..f4f5c09f8 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -329,11 +329,12 @@ type Resources struct { DeviceCgroupRules []string // List of rule to be added to the device cgroup DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows @@ -369,9 +370,10 @@ type HostConfig struct { // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index a41e3d8d9..d8f19ae22 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -323,6 +323,22 @@ func (args Args) WalkValues(field string, op func(value string) error) error { return nil } +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 3fef974df..ab4446b38 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -79,7 +79,8 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index ccb448f23..71e97338f 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -112,12 +112,13 @@ type ConfigReference struct { } var acceptedFilters = map[string]bool{ - "driver": true, - "type": true, - "name": true, - "id": true, - "label": true, - "scope": true, + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, } // ValidateFilters validates the list of filter args with the available filters. diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go index 67a41e1a8..2259c6be1 100644 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ b/vendor/github.com/docker/docker/api/types/seccomp.go @@ -77,8 +77,9 @@ type Arg struct { // Filter is used to conditionally apply Seccomp rules type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` + MinKernel string `json:"minKernel,omitempty"` } // Syscall is used to match a group of syscalls in Seccomp diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go index a1555cf43..16202ccce 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -27,9 +27,14 @@ type ConfigReferenceFileTarget struct { Mode os.FileMode } +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + // ConfigReference is a reference to a config in swarm type ConfigReference struct { - File *ConfigReferenceFileTarget + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` ConfigID string ConfigName string } diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 151211ff5..48190c176 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -33,6 +33,7 @@ type SELinuxContext struct { // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { + Config string File string Registry string } @@ -71,4 +72,5 @@ type ContainerSpec struct { Secrets []*SecretReference `json:",omitempty"` Configs []*ConfigReference `json:",omitempty"` Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index b742cf1bf..484cd0be7 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -14,6 +14,7 @@ type ClusterInfo struct { RootRotationInProgress bool DefaultAddrPool []string SubnetSize uint32 + DataPathPort uint32 } // Swarm represents a swarm. @@ -153,6 +154,7 @@ type InitRequest struct { ListenAddr string AdvertiseAddr string DataPathAddr string + DataPathPort uint32 ForceNewCluster bool Spec Spec AutoLockManagers bool diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index b35605d12..d5a57df5d 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -127,6 +127,7 @@ type ResourceRequirements struct { type Placement struct { Constraints []string `json:",omitempty"` Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` // Platforms stores all the platforms that the image can run on. // This field is used in the platform filter for scheduling. If empty, diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index a8fae3ba3..a39ffcb7b 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -158,10 +158,12 @@ type Info struct { MemoryLimit bool SwapLimit bool KernelMemory bool + KernelMemoryTCP bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool CPUSet bool + PidsLimit bool IPv4Forwarding bool BridgeNfIptables bool BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index 6169c2bc6..a28881caf 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -12,8 +12,8 @@ func (e errNotFound) Cause() error { // NotFound is a helper to create an error of the class with the same name from any error type func NotFound(err error) error { - if err == nil { - return nil + if err == nil || IsNotFound(err) { + return err } return errNotFound{err} } @@ -28,8 +28,8 @@ func (e errInvalidParameter) Cause() error { // InvalidParameter is a helper to create an error of the class with the same name from any error type func InvalidParameter(err error) error { - if err == nil { - return nil + if err == nil || IsInvalidParameter(err) { + return err } return errInvalidParameter{err} } @@ -44,8 +44,8 @@ func (e errConflict) Cause() error { // Conflict is a helper to create an error of the class with the same name from any error type func Conflict(err error) error { - if err == nil { - return nil + if err == nil || IsConflict(err) { + return err } return errConflict{err} } @@ -60,8 +60,8 @@ func (e errUnauthorized) Cause() error { // Unauthorized is a helper to create an error of the class with the same name from any error type func Unauthorized(err error) error { - if err == nil { - return nil + if err == nil || IsUnauthorized(err) { + return err } return errUnauthorized{err} } @@ -76,6 +76,9 @@ func (e errUnavailable) Cause() error { // Unavailable is a helper to create an error of the class with the same name from any error type func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } return errUnavailable{err} } @@ -89,8 +92,8 @@ func (e errForbidden) Cause() error { // Forbidden is a helper to create an error of the class with the same name from any error type func Forbidden(err error) error { - if err == nil { - return nil + if err == nil || IsForbidden(err) { + return err } return errForbidden{err} } @@ -105,8 +108,8 @@ func (e errSystem) Cause() error { // System is a helper to create an error of the class with the same name from any error type func System(err error) error { - if err == nil { - return nil + if err == nil || IsSystem(err) { + return err } return errSystem{err} } @@ -121,8 +124,8 @@ func (e errNotModified) Cause() error { // NotModified is a helper to create an error of the class with the same name from any error type func NotModified(err error) error { - if err == nil { - return nil + if err == nil || IsNotModified(err) { + return err } return errNotModified{err} } @@ -137,8 +140,8 @@ func (e errAlreadyExists) Cause() error { // AlreadyExists is a helper to create an error of the class with the same name from any error type func AlreadyExists(err error) error { - if err == nil { - return nil + if err == nil || IsAlreadyExists(err) { + return err } return errAlreadyExists{err} } @@ -153,8 +156,8 @@ func (e errNotImplemented) Cause() error { // NotImplemented is a helper to create an error of the class with the same name from any error type func NotImplemented(err error) error { - if err == nil { - return nil + if err == nil || IsNotImplemented(err) { + return err } return errNotImplemented{err} } @@ -169,8 +172,8 @@ func (e errUnknown) Cause() error { // Unknown is a helper to create an error of the class with the same name from any error type func Unknown(err error) error { - if err == nil { - return nil + if err == nil || IsUnknown(err) { + return err } return errUnknown{err} } @@ -185,8 +188,8 @@ func (e errCancelled) Cause() error { // Cancelled is a helper to create an error of the class with the same name from any error type func Cancelled(err error) error { - if err == nil { - return nil + if err == nil || IsCancelled(err) { + return err } return errCancelled{err} } @@ -201,8 +204,8 @@ func (e errDeadline) Cause() error { // Deadline is a helper to create an error of the class with the same name from any error type func Deadline(err error) error { - if err == nil { - return nil + if err == nil || IsDeadline(err) { + return err } return errDeadline{err} } @@ -217,8 +220,8 @@ func (e errDataLoss) Cause() error { // DataLoss is a helper to create an error of the class with the same name from any error type func DataLoss(err error) error { - if err == nil { - return nil + if err == nil || IsDataLoss(err) { + return err } return errDataLoss{err} } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 070dccb75..bb623fa85 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -660,11 +660,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { + if err == syscall.ENOTSUP || err == syscall.EPERM { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). errors = append(errors, err.Error()) continue } @@ -743,7 +745,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) compressWriter, options.ChownOpts, ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) defer func() { // Make sure to check the error on Close. @@ -901,7 +903,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err var dirs []*tar.Header idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) rootIDs := idMapping.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) // Iterate through the files in the archive. loop: diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 970d4d068..0601f7b0d 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -2,22 +2,29 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "fmt" + "io/ioutil" "os" "path/filepath" "strings" + "syscall" + "github.com/containerd/continuity/fs" "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" "golang.org/x/sys/unix" ) -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} + return overlayWhiteoutConverter{inUserNS: inUserNS} } return nil } -type overlayWhiteoutConverter struct{} +type overlayWhiteoutConverter struct { + inUserNS bool +} func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { // convert whiteouts to AUFS format @@ -61,13 +68,22 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os return } -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { base := filepath.Base(path) dir := filepath.Dir(path) // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + if c.inUserNS { + if err = replaceDirWithOverlayOpaque(dir); err != nil { + return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir) + } + } else { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + } // don't write the file itself return false, err } @@ -78,7 +94,19 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, originalPath := filepath.Join(dir, originalBase) if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err + if c.inUserNS { + // Ubuntu and a few distros support overlayfs in userns. + // + // Although we can't call mknod directly in userns (at least on bionic kernel 4.15), + // we can still create 0,0 char device using mknodChar0Overlay(). + // + // NOTE: we don't need this hack for the containerd snapshotter+unpack model. + if err := mknodChar0Overlay(originalPath); err != nil { + return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath) + } + } else { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } } if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { return false, err @@ -90,3 +118,144 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, return true, nil } + +// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking. +// This function can be used for creating 0,0 char device in userns on Ubuntu. +// +// Steps: +// * Mkdir lower,upper,merged,work +// * Create lower/dummy +// * Mount overlayfs +// * Unlink merged/dummy +// * Unmount overlayfs +// * Make sure a 0,0 char device is created as upper/dummy +// * Rename upper/dummy to cleansedOriginalPath +func mknodChar0Overlay(cleansedOriginalPath string) error { + dir := filepath.Dir(cleansedOriginalPath) + tmp, err := ioutil.TempDir(dir, "mc0o") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { + return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return errors.Wrapf(err, "failed to unlink %s", mergedDummy) + } + if err := syscall.Unmount(merged, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + if err := isChar0(upperDummy); err != nil { + return err + } + if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath) + } + return nil +} + +func isChar0(path string) error { + osStat, err := os.Stat(path) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", path) + } + st, ok := osStat.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("got unsupported stat for %s", path) + } + if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR { + return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode) + } + if st.Rdev != 0 { + return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev) + } + return nil +} + +// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque +// xattr. The contents of the directory are preserved. +func replaceDirWithOverlayOpaque(path string) error { + if path == "/" { + return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"") + } + dir := filepath.Dir(path) + tmp, err := ioutil.TempDir(dir, "rdwoo") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + // newPath is a new empty directory crafted with trusted.overlay.opaque xattr. + // we copy the content of path into newPath, remove path, and rename newPath to path. + newPath, err := createDirWithOverlayOpaque(tmp) + if err != nil { + return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp) + } + if err := fs.CopyDir(newPath, path); err != nil { + return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path) + } + if err := os.RemoveAll(path); err != nil { + return err + } + return os.Rename(newPath, path) +} + +// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr, +// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu. +func createDirWithOverlayOpaque(tmp string) (string, error) { + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := os.MkdirAll(lowerDummy, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy) + } + // upperDummy becomes a 0,0-char device file here + if err := os.Mkdir(mergedDummy, 0700); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy) + } + // upperDummy becomes a directory with trusted.overlay.opaque xattr + // (but can't be verified in userns) + if err := syscall.Unmount(merged, 0); err != nil { + return "", errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + return upperDummy, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go index 462dfc632..65a73354c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -2,6 +2,6 @@ package archive // import "github.com/docker/docker/pkg/archive" -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { return nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 43734db5b..aedb91b03 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -63,12 +63,16 @@ func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } func (c changesByPath) Len() int { return len(c) } func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { - return a == b || + return a.Equal(b) || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index c06a209d8..06217b716 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -16,7 +16,13 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { oldStat.UID() != newStat.UID() || oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go index 6555c0136..9906685e4 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -7,9 +7,13 @@ import ( ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mtim() != newStat.Mtim() || + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || oldStat.Mode() != newStat.Mode() || oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { return true diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index d0f13ca79..57fddac07 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -336,6 +336,14 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read return } + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) if hdr.Typeflag == tar.TypeLink { hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 6883ba132..146e21fe1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -240,11 +240,13 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp dest = filepath.Clean(dest) // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err + if runtime.GOOS != "windows" { + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform if decompress { decompLayer, err := DecompressStream(layer) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 28cad499a..34f1c726f 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -106,7 +106,7 @@ func (pm *PatternMatcher) Patterns() []*Pattern { return pm.patterns } -// Pattern defines a single regexp used used to filter file paths. +// Pattern defines a single regexp used to filter file paths. type Pattern struct { cleanedPattern string dirs []string diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index ee15ed52b..47ecd0c09 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -1,7 +1,10 @@ package homedir // import "github.com/docker/docker/pkg/homedir" import ( + "errors" "os" + "path/filepath" + "strings" "github.com/docker/docker/pkg/idtools" ) @@ -19,3 +22,88 @@ func GetStatic() (string, error) { } return usr.Home, nil } + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 75ada2fe5..f0a363ded 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -11,3 +11,23 @@ import ( func GetStatic() (string, error) { return "", errors.New("homedir.GetStatic() is not supported on this system") } + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index dd95f3670..a68b566ce 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -4,13 +4,12 @@ import ( "encoding/json" "fmt" "io" - "os" "strings" "time" - "github.com/Nvveen/Gotty" "github.com/docker/docker/pkg/term" "github.com/docker/go-units" + "github.com/morikuni/aec" ) // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to @@ -151,60 +150,23 @@ type JSONMessage struct { Aux *json.RawMessage `json:"aux,omitempty"` } -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) } -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") -} - -func clearLine(out io.Writer, ti termInfo) { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[1K") - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[K") - } -} - -func cursorUp(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cuu", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dA", l) - } +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) } -func cursorDown(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cud", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dB", l) - } +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) } -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("authentication is required") @@ -212,8 +174,8 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { return jm.Error } var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) endl = "\r" fmt.Fprintf(out, endl) } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal @@ -230,7 +192,7 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } - if jm.Progress != nil && termInfo != nil { + if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) @@ -248,25 +210,11 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) - ids = make(map[string]int) + ids = make(map[string]uint) ) - var termInfo termInfo - - if isTerminal { - term := os.Getenv("TERM") - if term == "" { - term = "vt102" - } - - var err error - if termInfo, err = gotty.OpenTermInfo(term); err != nil { - termInfo = &noTermInfo{} - } - } - for { - diff := 0 + var diff uint var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { @@ -294,15 +242,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // when we output something that's not // accounted for in the map, such as a line // with no ID. - line = len(ids) + line = uint(len(ids)) ids[jm.ID] = line - if termInfo != nil { + if isTerminal { fmt.Fprintf(out, "\n") } } - diff = len(ids) - line - if termInfo != nil { - cursorUp(out, termInfo, diff) + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) } } else { // When outputting something that isn't progress @@ -310,11 +258,11 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // don't want progress entries from some previous // operation to be updated (for example, pull -a // with multiple tags). - ids = make(map[string]int) + ids = make(map[string]uint) } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - cursorDown(out, termInfo, diff) + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) } if err != nil { return err diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go index 272363b68..ffd473311 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -135,15 +135,3 @@ func parseOptions(options string) (int, string) { } return flag, strings.Join(data, ",") } - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 874aff654..4afd63c42 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -2,12 +2,46 @@ package mount // import "github.com/docker/docker/pkg/mount" import ( "sort" + "strconv" "strings" - "syscall" "github.com/sirupsen/logrus" ) +// mountError records an error from mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + // FilterFunc is a type defining a callback function // to filter out unwanted entries. It takes a pointer // to an Info struct (not fully populated, currently @@ -89,12 +123,7 @@ func ForceMount(device, target, mType, options string) error { // Unmount lazily unmounts a filesystem on supported platforms, otherwise // does a normal unmount. func Unmount(target string) error { - err := unmount(target, mntDetach) - if err == syscall.EINVAL { - // ignore "not mounted" error - err = nil - } - return err + return unmount(target, mntDetach) } // RecursiveUnmount unmounts the target and all mounts underneath, starting with @@ -114,25 +143,14 @@ func RecursiveUnmount(target string) error { logrus.Debugf("Trying to unmount %s", m.Mountpoint) err = unmount(m.Mountpoint, mntDetach) if err != nil { - // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is - // not a mountpoint (which is ok in this case). - // Meanwhile calling `Mounted()` is very expensive. - // - // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching - // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), - // this is nicer than defining a custom value that we can refer to in each platform file. - if err == syscall.EINVAL { - continue - } - if i == len(mounts)-1 { + if i == len(mounts)-1 { // last mount if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { return err } - continue + } else { + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) } - // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem - logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) - continue } logrus.Debugf("Unmounted %s", m.Mountpoint) diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go index b6ab83a23..09ad36060 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -11,11 +11,9 @@ package mount // import "github.com/docker/docker/pkg/mount" import "C" import ( - "fmt" "strings" + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -49,12 +47,13 @@ func mount(device, target, mType string, flag uintptr, data string) error { } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) + return &mountError{ + op: "mount", + source: device, + target: target, + flags: flag, + err: syscall.Errno(errno), + } } return nil } - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go index 631daf10a..a0a1ad236 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -33,25 +33,41 @@ func mount(device, target, mType string, flags uintptr, data string) error { // Initial call applying all non-propagation flags for mount // or remount with changed data if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return err + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } } } if flags&ptypes != 0 { // Change the propagation type. if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return err + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } } } if oflags&broflags == broflags { // Remount the bind to apply read only. - return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + + } } return nil } - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go index 1428dffa5..c3e5aec27 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -5,7 +5,3 @@ package mount // import "github.com/docker/docker/pkg/mount" func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") } - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 538f6637a..8a100f0bc 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -48,18 +48,22 @@ func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "runbindable") } -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) +// MakeMount ensures that the file or directory given is a mount point, +// bind mounting it to itself it case it is not. +func MakeMount(mnt string) error { + mounted, err := Mounted(mnt) if err != nil { return err } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } + if mounted { + return nil } - if _, err = Mounted(mountPoint); err != nil { + + return Mount(mnt, mnt, "none", "bind") +} + +func ensureMountedAs(mountPoint, options string) error { + if err := MakeMount(mountPoint); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go b/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go new file mode 100644 index 000000000..4be427685 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package mount // import "github.com/docker/docker/pkg/mount" + +import "golang.org/x/sys/unix" + +func unmount(target string, flags int) error { + err := unix.Unmount(target, flags) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go new file mode 100644 index 000000000..a88ad3577 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go @@ -0,0 +1,7 @@ +// +build windows + +package mount // import "github.com/docker/docker/pkg/mount" + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go index caed97c96..4013bded1 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -1,3 +1,5 @@ +// +build !mips,!mipsle,!mips64,!mips64le + package signal // import "github.com/docker/docker/pkg/signal" import ( diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go new file mode 100644 index 000000000..4c7989121 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go @@ -0,0 +1,84 @@ +// +build linux +// +build mips mipsle mips64 mips64le + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 127 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "SIGEMT": unix.SIGEMT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 7477995f1..de5a1c0fb 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -3,6 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( + "os" "syscall" ) @@ -13,7 +14,7 @@ import ( func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { - return nil, err + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} } return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index 3d7e2ebbe..86bb6dd55 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -3,6 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( + "os" "syscall" ) @@ -59,7 +60,7 @@ func (s StatT) IsDir() bool { func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { - return nil, err + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} } return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index de5a526b6..6bb9258c9 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -19,16 +19,11 @@ type ServiceOptions struct { AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` Mirrors []string `json:"registry-mirrors,omitempty"` InsecureRegistries []string `json:"insecure-registries,omitempty"` - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only bool `json:"disable-legacy-registry,omitempty"` } // serviceConfig holds daemon configuration for the registry service. type serviceConfig struct { registrytypes.ServiceConfig - V2Only bool } var ( @@ -76,7 +71,6 @@ func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { // Hack: Bypass setting the mirrors to IndexConfigs since they are going away // and Mirrors are only for the official registry anyways. }, - V2Only: options.V2Only, } if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index 7a84bbfb7..6727b7dc3 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -145,7 +145,7 @@ func trustedLocation(req *http.Request) bool { // addRequiredHeadersToRedirectedRequests adds the necessary redirection headers // for redirected requests func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { + if len(via) != 0 && via[0] != nil { if trustedLocation(req) && trustedLocation(via[0]) { req.Header = via[0].Header return nil diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index b441970ff..08f5c7a4e 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -309,20 +309,5 @@ func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEn } func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(hostname) - if err != nil { - return nil, err - } - - if s.config.V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(hostname) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil + return s.lookupV2Endpoints(hostname) } diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go index 5cb251b13..23caf7f12 100644 --- a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go +++ b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go @@ -14,6 +14,11 @@ import ( "github.com/sirupsen/logrus" ) +const ( + // DefaultResolvConf points to the default file used for dns configuration on a linux machine + DefaultResolvConf = "/etc/resolv.conf" +) + var ( // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} @@ -50,15 +55,7 @@ type File struct { // Get returns the contents of /etc/resolv.conf and its hash func Get() (*File, error) { - resolv, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - return nil, err - } - hash, err := ioutils.HashData(bytes.NewReader(resolv)) - if err != nil { - return nil, err - } - return &File{Content: resolv, Hash: hash}, nil + return GetSpecific(DefaultResolvConf) } // GetSpecific returns the contents of the user specified resolv.conf file and its hash diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go index 164b18096..b102ba4c3 100644 --- a/vendor/github.com/docker/libnetwork/types/types.go +++ b/vendor/github.com/docker/libnetwork/types/types.go @@ -7,6 +7,8 @@ import ( "net" "strconv" "strings" + + "github.com/ishidawataru/sctp" ) // constants for the IP address type @@ -96,6 +98,8 @@ func (p PortBinding) HostAddr() (net.Addr, error) { return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil case TCP: return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case SCTP: + return &sctp.SCTPAddr{IP: []net.IP{p.HostIP}, Port: int(p.HostPort)}, nil default: return nil, ErrInvalidProtocolBinding(p.Proto.String()) } @@ -108,6 +112,8 @@ func (p PortBinding) ContainerAddr() (net.Addr, error) { return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil case TCP: return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil + case SCTP: + return &sctp.SCTPAddr{IP: []net.IP{p.IP}, Port: int(p.Port)}, nil default: return nil, ErrInvalidProtocolBinding(p.Proto.String()) } @@ -139,7 +145,12 @@ func (p *PortBinding) String() string { return ret } -// FromString reads the TransportPort structure from string +// FromString reads the PortBinding structure from string s. +// String s is a triple of "protocol/containerIP:port/hostIP:port" +// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form. +// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported. +// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString +// returns an error. func (p *PortBinding) FromString(s string) error { ps := strings.Split(s, "/") if len(ps) != 3 { @@ -161,21 +172,19 @@ func (p *PortBinding) FromString(s string) error { } func parseIPPort(s string) (net.IP, uint16, error) { - pp := strings.Split(s, ":") - if len(pp) != 2 { - return nil, 0, BadRequestErrorf("invalid format: %s", s) + hoststr, portstr, err := net.SplitHostPort(s) + if err != nil { + return nil, 0, err } - var ip net.IP - if pp[0] != "" { - if ip = net.ParseIP(pp[0]); ip == nil { - return nil, 0, BadRequestErrorf("invalid ip: %s", pp[0]) - } + ip := net.ParseIP(hoststr) + if ip == nil { + return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr) } - port, err := strconv.ParseUint(pp[1], 10, 16) + port, err := strconv.ParseUint(portstr, 10, 16) if err != nil { - return nil, 0, BadRequestErrorf("invalid port: %s", pp[1]) + return nil, 0, BadRequestErrorf("invalid port: %s", portstr) } return ip, uint16(port), nil @@ -233,6 +242,8 @@ const ( TCP = 6 // UDP is for the UDP ip protocol UDP = 17 + // SCTP is for the SCTP ip protocol + SCTP = 132 ) // Protocol represents an IP protocol number @@ -246,6 +257,8 @@ func (p Protocol) String() string { return "tcp" case UDP: return "udp" + case SCTP: + return "sctp" default: return fmt.Sprintf("%d", p) } @@ -260,6 +273,8 @@ func ParseProtocol(s string) Protocol { return UDP case "tcp": return TCP + case "sctp": + return SCTP default: return 0 } @@ -317,6 +332,8 @@ func CompareIPNet(a, b *net.IPNet) bool { } // GetMinimalIP returns the address in its shortest form +// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. +// Otherwise ip is returned unchanged. func GetMinimalIP(ip net.IP) net.IP { if ip != nil && ip.To4() != nil { return ip.To4() diff --git a/vendor/github.com/godbus/dbus/.travis.yml b/vendor/github.com/godbus/dbus/.travis.yml new file mode 100644 index 000000000..2e1bbb78c --- /dev/null +++ b/vendor/github.com/godbus/dbus/.travis.yml @@ -0,0 +1,40 @@ +dist: precise +language: go +go_import_path: github.com/godbus/dbus +sudo: true + +go: + - 1.6.3 + - 1.7.3 + - tip + +env: + global: + matrix: + - TARGET=amd64 + - TARGET=arm64 + - TARGET=arm + - TARGET=386 + - TARGET=ppc64le + +matrix: + fast_finish: true + allow_failures: + - go: tip + exclude: + - go: tip + env: TARGET=arm + - go: tip + env: TARGET=arm64 + - go: tip + env: TARGET=386 + - go: tip + env: TARGET=ppc64le + +addons: + apt: + packages: + - dbus + - dbus-x11 + +before_install: diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 000000000..c88f9b2bd --- /dev/null +++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +