* * ==> Audit <== * |--------------|--------------------------------|----------|----------|---------|-------------------------------|-------------------------------| | Command | Args | Profile | User | Version | Start Time | End Time | |--------------|--------------------------------|----------|----------|---------|-------------------------------|-------------------------------| | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 13:10:41 CST | Thu, 10 Mar 2022 13:10:42 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 13:11:07 CST | Thu, 10 Mar 2022 13:11:09 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 13:11:40 CST | Thu, 10 Mar 2022 13:11:41 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 13:51:19 CST | Thu, 10 Mar 2022 13:51:21 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 14:37:39 CST | Thu, 10 Mar 2022 14:37:41 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 14:40:08 CST | Thu, 10 Mar 2022 14:40:09 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 14:46:01 CST | Thu, 10 Mar 2022 14:46:02 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 14:46:21 CST | Thu, 10 Mar 2022 14:46:22 CST | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Thu, 10 Mar 2022 14:49:46 CST | Thu, 10 Mar 2022 14:49:47 CST | | | --user=skaffold | | | | | | | config | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 13:52:36 CDT | Mon, 14 Mar 2022 13:52:36 CDT | | config | view | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 13:52:47 CDT | Mon, 14 Mar 2022 13:52:47 CDT | | config | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 13:53:05 CDT | Mon, 14 Mar 2022 13:53:05 CDT | | config | view | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 13:53:07 CDT | Mon, 14 Mar 2022 13:53:07 CDT | | start | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 13:53:18 CDT | Mon, 14 Mar 2022 13:53:47 CDT | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Mon, 14 Mar 2022 13:56:54 CDT | Mon, 14 Mar 2022 13:56:55 CDT | | | --user=skaffold | | | | | | | profile | minikube | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:06:15 CDT | Mon, 14 Mar 2022 14:06:16 CDT | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Mon, 14 Mar 2022 14:07:38 CDT | Mon, 14 Mar 2022 14:07:40 CDT | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Mon, 14 Mar 2022 14:08:24 CDT | Mon, 14 Mar 2022 14:08:25 CDT | | | --user=skaffold | | | | | | | profile | default | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:12:45 CDT | Mon, 14 Mar 2022 14:12:45 CDT | | profile | list | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:12:55 CDT | Mon, 14 Mar 2022 14:12:56 CDT | | service | list | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:13:25 CDT | Mon, 14 Mar 2022 14:13:28 CDT | | ip | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:14:24 CDT | Mon, 14 Mar 2022 14:14:25 CDT | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Mon, 14 Mar 2022 14:23:17 CDT | Mon, 14 Mar 2022 14:23:19 CDT | | | --user=skaffold | | | | | | | stop | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:24:59 CDT | Mon, 14 Mar 2022 14:25:11 CDT | | start | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:25:14 CDT | Mon, 14 Mar 2022 14:25:38 CDT | | docker-env | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:27:37 CDT | Mon, 14 Mar 2022 14:27:38 CDT | | docker-env | | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:28:57 CDT | Mon, 14 Mar 2022 14:28:58 CDT | | -p | minikube docker-env | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:29:10 CDT | Mon, 14 Mar 2022 14:29:12 CDT | | -p | minikube | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:32:21 CDT | Mon, 14 Mar 2022 14:32:21 CDT | | docker-env | --shell none -p minikube | minikube | skaffold | v1.23.2 | Mon, 14 Mar 2022 14:32:44 CDT | Mon, 14 Mar 2022 14:32:45 CDT | | | --user=skaffold | | | | | | | addons | list | minikube | alekim64 | v1.23.2 | Mon, 14 Mar 2022 14:47:40 CDT | Mon, 14 Mar 2022 14:47:41 CDT | | stop | | minikube | alekim64 | v1.23.2 | Tue, 15 Mar 2022 10:47:46 CDT | Tue, 15 Mar 2022 10:47:58 CDT | | update-check | | minikube | alekim64 | v1.23.2 | Tue, 15 Mar 2022 11:24:03 CDT | Tue, 15 Mar 2022 11:24:04 CDT | | delete | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 11:28:04 CDT | Tue, 15 Mar 2022 11:28:42 CDT | | delete | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 11:35:55 CDT | Tue, 15 Mar 2022 11:35:58 CDT | | delete | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 11:40:00 CDT | Tue, 15 Mar 2022 11:40:04 CDT | | delete | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 11:47:27 CDT | Tue, 15 Mar 2022 11:47:32 CDT | | start | --driver=docker | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 11:49:18 CDT | Tue, 15 Mar 2022 11:50:11 CDT | | | --kubernetes-version=v1.21.10 | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.25.2 | Tue, 15 Mar 2022 11:50:36 CDT | Tue, 15 Mar 2022 11:50:37 CDT | | | --user=skaffold | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.25.2 | Tue, 15 Mar 2022 11:51:32 CDT | Tue, 15 Mar 2022 11:51:33 CDT | | | --user=skaffold | | | | | | | service | my-service | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 11:56:33 CDT | Tue, 15 Mar 2022 11:57:34 CDT | | service | list | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:01:34 CDT | Tue, 15 Mar 2022 12:01:35 CDT | | service | list | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:01:54 CDT | Tue, 15 Mar 2022 12:01:55 CDT | | service | my-service | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:02:11 CDT | Tue, 15 Mar 2022 12:06:46 CDT | | service | list | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:06:52 CDT | Tue, 15 Mar 2022 12:06:53 CDT | | tunnel | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:01:46 CDT | Tue, 15 Mar 2022 12:06:55 CDT | | service | --help | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:07:03 CDT | Tue, 15 Mar 2022 12:07:03 CDT | | service | my-service --https=true | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:07:37 CDT | Tue, 15 Mar 2022 12:07:45 CDT | | | --url | | | | | | | service | my-service --https=true | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:07:47 CDT | Tue, 15 Mar 2022 12:07:56 CDT | | service | my-service | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:08:29 CDT | Tue, 15 Mar 2022 12:08:44 CDT | | ssh | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:14:16 CDT | Tue, 15 Mar 2022 12:14:31 CDT | | service | my-service | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:14:55 CDT | Tue, 15 Mar 2022 12:15:11 CDT | | service | list | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:19:27 CDT | Tue, 15 Mar 2022 12:19:28 CDT | | service | my-service | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:15:12 CDT | Tue, 15 Mar 2022 12:22:23 CDT | | stop | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:22:31 CDT | Tue, 15 Mar 2022 12:22:45 CDT | | delete | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:23:14 CDT | Tue, 15 Mar 2022 12:23:20 CDT | | start | --driver=docker | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:23:22 CDT | Tue, 15 Mar 2022 12:24:10 CDT | | | --kubernetes-version=v1.21.10 | | | | | | | docker-env | --shell none -p minikube | minikube | skaffold | v1.25.2 | Tue, 15 Mar 2022 12:27:41 CDT | Tue, 15 Mar 2022 12:27:42 CDT | | | --user=skaffold | | | | | | | ssh | | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:36:04 CDT | Tue, 15 Mar 2022 12:37:50 CDT | | service | list | minikube | alekim64 | v1.25.2 | Tue, 15 Mar 2022 12:37:53 CDT | Tue, 15 Mar 2022 12:37:54 CDT | |--------------|--------------------------------|----------|----------|---------|-------------------------------|-------------------------------| * * ==> Last Start <== * Log file created at: 2022/03/15 12:23:22 Running on machine: MBP-1430 Binary: Built with gc go1.17.6 for darwin/amd64 Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg I0315 12:23:22.365953 13746 out.go:297] Setting OutFile to fd 1 ... I0315 12:23:22.366143 13746 out.go:349] isatty.IsTerminal(1) = true I0315 12:23:22.366146 13746 out.go:310] Setting ErrFile to fd 2... I0315 12:23:22.366151 13746 out.go:349] isatty.IsTerminal(2) = true I0315 12:23:22.366243 13746 root.go:315] Updating PATH: /Users/alekim64/.minikube/bin I0315 12:23:22.366638 13746 out.go:304] Setting JSON to false I0315 12:23:22.393391 13746 start.go:112] hostinfo: {"hostname":"MBP-1430.ad.avant.com","uptime":321787,"bootTime":1647043215,"procs":495,"os":"darwin","platform":"darwin","platformFamily":"Standalone Workstation","platformVersion":"10.15.7","kernelVersion":"19.6.0","kernelArch":"x86_64","virtualizationSystem":"","virtualizationRole":"","hostId":"b0093572-4c13-5dd2-ac8d-aad198ffc7fd"} W0315 12:23:22.393502 13746 start.go:120] gopshost.Virtualization returned error: not implemented yet I0315 12:23:22.418199 13746 out.go:176] 😄 minikube v1.25.2 on Darwin 10.15.7 I0315 12:23:22.418331 13746 notify.go:193] Checking for updates... I0315 12:23:22.418522 13746 driver.go:344] Setting default libvirt URI to qemu:///system I0315 12:23:22.562446 13746 docker.go:132] docker version: linux-20.10.13 I0315 12:23:22.562570 13746 cli_runner.go:133] Run: docker system info --format "{{json .}}" I0315 12:23:22.797254 13746 info.go:263] docker info: {ID:5FGP:AECT:JRQZ:MTBU:3YJX:5AFT:N3EO:I7XL:6KXS:H3V5:LDZF:GTY6 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:60 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:45 OomKillDisable:false NGoroutines:48 SystemTime:2022-03-15 17:23:22.721486133 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:4 KernelVersion:5.10.104-linuxkit OperatingSystem:Docker Desktop OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:8346034176 GenericResources: DockerRootDir:/var/lib/docker HTTPProxy:http.docker.internal:3128 HTTPSProxy:http.docker.internal:3128 NoProxy:hubproxy.docker.internal Name:docker-desktop Labels:[] ExperimentalBuild:false ServerVersion:20.10.13 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:2a1d4dbdb2a1030dc5b01e96fb110a9d9f150ecc Expected:2a1d4dbdb2a1030dc5b01e96fb110a9d9f150ecc} RuncCommit:{ID:v1.0.3-0-gf46b6ba Expected:v1.0.3-0-gf46b6ba} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=default name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/local/lib/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.8.0] map[Name:compose Path:/usr/local/lib/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.3.3] map[Name:scan Path:/usr/local/lib/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.17.0]] Warnings:}} I0315 12:23:22.821770 13746 out.go:176] ✨ Using the docker driver based on user configuration I0315 12:23:22.821797 13746 start.go:281] selected driver: docker I0315 12:23:22.821807 13746 start.go:798] validating driver "docker" against I0315 12:23:22.821819 13746 start.go:809] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} I0315 12:23:22.822010 13746 cli_runner.go:133] Run: docker system info --format "{{json .}}" I0315 12:23:23.060011 13746 info.go:263] docker info: {ID:5FGP:AECT:JRQZ:MTBU:3YJX:5AFT:N3EO:I7XL:6KXS:H3V5:LDZF:GTY6 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:60 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:45 OomKillDisable:false NGoroutines:48 SystemTime:2022-03-15 17:23:22.98517515 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:4 KernelVersion:5.10.104-linuxkit OperatingSystem:Docker Desktop OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:8346034176 GenericResources: DockerRootDir:/var/lib/docker HTTPProxy:http.docker.internal:3128 HTTPSProxy:http.docker.internal:3128 NoProxy:hubproxy.docker.internal Name:docker-desktop Labels:[] ExperimentalBuild:false ServerVersion:20.10.13 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:2a1d4dbdb2a1030dc5b01e96fb110a9d9f150ecc Expected:2a1d4dbdb2a1030dc5b01e96fb110a9d9f150ecc} RuncCommit:{ID:v1.0.3-0-gf46b6ba Expected:v1.0.3-0-gf46b6ba} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=default name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/local/lib/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.8.0] map[Name:compose Path:/usr/local/lib/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.3.3] map[Name:scan Path:/usr/local/lib/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.17.0]] Warnings:}} I0315 12:23:23.060145 13746 start_flags.go:288] no existing cluster config was found, will generate one from the flags I0315 12:23:23.060352 13746 start_flags.go:369] Using suggested 7911MB memory alloc based on sys=32768MB, container=7959MB I0315 12:23:23.060433 13746 start_flags.go:397] setting extra-config: kubelet.housekeeping-interval=5m I0315 12:23:23.060446 13746 start_flags.go:813] Wait components to verify : map[apiserver:true system_pods:true] I0315 12:23:23.060459 13746 cni.go:93] Creating CNI manager for "" I0315 12:23:23.060466 13746 cni.go:167] CNI unnecessary in this configuration, recommending no CNI I0315 12:23:23.060472 13746 start_flags.go:302] config: {Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 Memory:7911 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.21.10 ClusterName:minikube Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false} I0315 12:23:23.103998 13746 out.go:176] 👍 Starting control plane node minikube in cluster minikube I0315 12:23:23.104036 13746 cache.go:120] Beginning downloading kic base image for docker with docker I0315 12:23:23.127947 13746 out.go:176] 🚜 Pulling base image ... I0315 12:23:23.128044 13746 preload.go:132] Checking if preload exists for k8s version v1.21.10 and runtime docker I0315 12:23:23.128155 13746 image.go:75] Checking for gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 in local docker daemon I0315 12:23:23.128187 13746 preload.go:148] Found local preload: /Users/alekim64/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v17-v1.21.10-docker-overlay2-amd64.tar.lz4 I0315 12:23:23.128205 13746 cache.go:57] Caching tarball of preloaded images I0315 12:23:23.128654 13746 preload.go:174] Found /Users/alekim64/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v17-v1.21.10-docker-overlay2-amd64.tar.lz4 in cache, skipping download I0315 12:23:23.128689 13746 cache.go:60] Finished verifying existence of preloaded tar for v1.21.10 on docker I0315 12:23:23.131067 13746 profile.go:148] Saving config to /Users/alekim64/.minikube/profiles/minikube/config.json ... I0315 12:23:23.131119 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/config.json: {Name:mk88c64e35681e240b79c645dd006794769fdb99 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:23.315056 13746 image.go:79] Found gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 in local docker daemon, skipping pull I0315 12:23:23.315064 13746 cache.go:142] gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 exists in daemon, skipping load I0315 12:23:23.315074 13746 cache.go:208] Successfully downloaded all kic artifacts I0315 12:23:23.315106 13746 start.go:313] acquiring machines lock for minikube: {Name:mka58e0f0a6a35194571c1a2a1e2a3dd2cf69966 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0315 12:23:23.315198 13746 start.go:317] acquired machines lock for "minikube" in 83.247µs I0315 12:23:23.315221 13746 start.go:89] Provisioning new machine with config: &{Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 Memory:7911 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.21.10 ClusterName:minikube Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.21.10 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false} &{Name: IP: Port:8443 KubernetesVersion:v1.21.10 ContainerRuntime:docker ControlPlane:true Worker:true} I0315 12:23:23.315288 13746 start.go:126] createHost starting for "" (driver="docker") I0315 12:23:23.339708 13746 out.go:203] 🔥 Creating docker container (CPUs=2, Memory=7911MB) ... I0315 12:23:23.339921 13746 start.go:160] libmachine.API.Create for "minikube" (driver="docker") I0315 12:23:23.339957 13746 client.go:168] LocalClient.Create starting I0315 12:23:23.340076 13746 main.go:130] libmachine: Reading certificate data from /Users/alekim64/.minikube/certs/ca.pem I0315 12:23:23.340138 13746 main.go:130] libmachine: Decoding PEM data... I0315 12:23:23.340157 13746 main.go:130] libmachine: Parsing certificate... I0315 12:23:23.340217 13746 main.go:130] libmachine: Reading certificate data from /Users/alekim64/.minikube/certs/cert.pem I0315 12:23:23.340258 13746 main.go:130] libmachine: Decoding PEM data... I0315 12:23:23.340266 13746 main.go:130] libmachine: Parsing certificate... I0315 12:23:23.341014 13746 cli_runner.go:133] Run: docker network inspect minikube --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" W0315 12:23:23.504846 13746 cli_runner.go:180] docker network inspect minikube --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1 I0315 12:23:23.504982 13746 network_create.go:254] running [docker network inspect minikube] to gather additional debugging logs... I0315 12:23:23.505006 13746 cli_runner.go:133] Run: docker network inspect minikube W0315 12:23:23.668009 13746 cli_runner.go:180] docker network inspect minikube returned with exit code 1 I0315 12:23:23.668033 13746 network_create.go:257] error running [docker network inspect minikube]: docker network inspect minikube: exit status 1 stdout: [] stderr: Error: No such network: minikube I0315 12:23:23.668043 13746 network_create.go:259] output of [docker network inspect minikube]: -- stdout -- [] -- /stdout -- ** stderr ** Error: No such network: minikube ** /stderr ** I0315 12:23:23.668179 13746 cli_runner.go:133] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" I0315 12:23:23.835859 13746 network.go:288] reserving subnet 192.168.49.0 for 1m0s: &{mu:{state:0 sema:0} read:{v:{m:map[] amended:true}} dirty:map[192.168.49.0:0xc00077cbb8] misses:0} I0315 12:23:23.835895 13746 network.go:235] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:}} I0315 12:23:23.835911 13746 network_create.go:106] attempt to create docker network minikube 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ... I0315 12:23:23.836007 13746 cli_runner.go:133] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true minikube I0315 12:23:24.034897 13746 network_create.go:90] docker network minikube 192.168.49.0/24 created I0315 12:23:24.034922 13746 kic.go:106] calculated static IP "192.168.49.2" for the "minikube" container I0315 12:23:24.035076 13746 cli_runner.go:133] Run: docker ps -a --format {{.Names}} I0315 12:23:24.200283 13746 cli_runner.go:133] Run: docker volume create minikube --label name.minikube.sigs.k8s.io=minikube --label created_by.minikube.sigs.k8s.io=true I0315 12:23:24.364203 13746 oci.go:102] Successfully created a docker volume minikube I0315 12:23:24.364344 13746 cli_runner.go:133] Run: docker run --rm --name minikube-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=minikube --entrypoint /usr/bin/test -v minikube:/var gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 -d /var/lib I0315 12:23:25.004670 13746 oci.go:106] Successfully prepared a docker volume minikube I0315 12:23:25.004711 13746 preload.go:132] Checking if preload exists for k8s version v1.21.10 and runtime docker I0315 12:23:25.004725 13746 kic.go:179] Starting extracting preloaded images to volume ... I0315 12:23:25.004852 13746 cli_runner.go:133] Run: docker run --rm --entrypoint /usr/bin/tar -v /Users/alekim64/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v17-v1.21.10-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v minikube:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 -I lz4 -xf /preloaded.tar -C /extractDir I0315 12:23:35.367732 13746 cli_runner.go:186] Completed: docker run --rm --entrypoint /usr/bin/tar -v /Users/alekim64/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v17-v1.21.10-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v minikube:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 -I lz4 -xf /preloaded.tar -C /extractDir: (10.362644108s) I0315 12:23:35.367756 13746 kic.go:188] duration metric: took 10.362850 seconds to extract preloaded images to volume I0315 12:23:35.367899 13746 cli_runner.go:133] Run: docker info --format "'{{json .SecurityOptions}}'" I0315 12:23:35.668290 13746 cli_runner.go:133] Run: docker run -d -t --privileged --device /dev/fuse --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname minikube --name minikube --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=minikube --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=minikube --network minikube --ip 192.168.49.2 --volume minikube:/var --security-opt apparmor=unconfined --memory=7911mb --memory-swap=7911mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 I0315 12:23:36.178376 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Running}} I0315 12:23:36.358585 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Status}} I0315 12:23:36.545134 13746 cli_runner.go:133] Run: docker exec minikube stat /var/lib/dpkg/alternatives/iptables I0315 12:23:36.785646 13746 oci.go:281] the created container "minikube" has a running status. I0315 12:23:36.785669 13746 kic.go:210] Creating ssh key for kic: /Users/alekim64/.minikube/machines/minikube/id_rsa... I0315 12:23:37.020880 13746 kic_runner.go:191] docker (temp): /Users/alekim64/.minikube/machines/minikube/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0315 12:23:37.241810 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Status}} I0315 12:23:37.405611 13746 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0315 12:23:37.405623 13746 kic_runner.go:114] Args: [docker exec --privileged minikube chown docker:docker /home/docker/.ssh/authorized_keys] I0315 12:23:37.642191 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Status}} I0315 12:23:37.804112 13746 machine.go:88] provisioning docker machine ... I0315 12:23:37.804270 13746 ubuntu.go:169] provisioning hostname "minikube" I0315 12:23:37.804365 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:37.980476 13746 main.go:130] libmachine: Using SSH client type: native I0315 12:23:37.980726 13746 main.go:130] libmachine: &{{{ 0 [] [] []} docker [0x43a2480] 0x43a5560 [] 0s} 127.0.0.1 65194 } I0315 12:23:37.980737 13746 main.go:130] libmachine: About to run SSH command: sudo hostname minikube && echo "minikube" | sudo tee /etc/hostname I0315 12:23:38.113077 13746 main.go:130] libmachine: SSH cmd err, output: : minikube I0315 12:23:38.113181 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:38.274649 13746 main.go:130] libmachine: Using SSH client type: native I0315 12:23:38.274828 13746 main.go:130] libmachine: &{{{ 0 [] [] []} docker [0x43a2480] 0x43a5560 [] 0s} 127.0.0.1 65194 } I0315 12:23:38.274839 13746 main.go:130] libmachine: About to run SSH command: if ! grep -xq '.*\sminikube' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 minikube/g' /etc/hosts; else echo '127.0.1.1 minikube' | sudo tee -a /etc/hosts; fi fi I0315 12:23:38.396862 13746 main.go:130] libmachine: SSH cmd err, output: : I0315 12:23:38.396888 13746 ubuntu.go:175] set auth options {CertDir:/Users/alekim64/.minikube CaCertPath:/Users/alekim64/.minikube/certs/ca.pem CaPrivateKeyPath:/Users/alekim64/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/Users/alekim64/.minikube/machines/server.pem ServerKeyPath:/Users/alekim64/.minikube/machines/server-key.pem ClientKeyPath:/Users/alekim64/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/Users/alekim64/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/Users/alekim64/.minikube} I0315 12:23:38.396918 13746 ubuntu.go:177] setting up certificates I0315 12:23:38.396925 13746 provision.go:83] configureAuth start I0315 12:23:38.397027 13746 cli_runner.go:133] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" minikube I0315 12:23:38.563229 13746 provision.go:138] copyHostCerts I0315 12:23:38.563331 13746 exec_runner.go:144] found /Users/alekim64/.minikube/cert.pem, removing ... I0315 12:23:38.563337 13746 exec_runner.go:207] rm: /Users/alekim64/.minikube/cert.pem I0315 12:23:38.563444 13746 exec_runner.go:151] cp: /Users/alekim64/.minikube/certs/cert.pem --> /Users/alekim64/.minikube/cert.pem (1127 bytes) I0315 12:23:38.563668 13746 exec_runner.go:144] found /Users/alekim64/.minikube/key.pem, removing ... I0315 12:23:38.563671 13746 exec_runner.go:207] rm: /Users/alekim64/.minikube/key.pem I0315 12:23:38.563780 13746 exec_runner.go:151] cp: /Users/alekim64/.minikube/certs/key.pem --> /Users/alekim64/.minikube/key.pem (1679 bytes) I0315 12:23:38.563977 13746 exec_runner.go:144] found /Users/alekim64/.minikube/ca.pem, removing ... I0315 12:23:38.563982 13746 exec_runner.go:207] rm: /Users/alekim64/.minikube/ca.pem I0315 12:23:38.564061 13746 exec_runner.go:151] cp: /Users/alekim64/.minikube/certs/ca.pem --> /Users/alekim64/.minikube/ca.pem (1082 bytes) I0315 12:23:38.564220 13746 provision.go:112] generating server cert: /Users/alekim64/.minikube/machines/server.pem ca-key=/Users/alekim64/.minikube/certs/ca.pem private-key=/Users/alekim64/.minikube/certs/ca-key.pem org=alekim64.minikube san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube minikube] I0315 12:23:38.766307 13746 provision.go:172] copyRemoteCerts I0315 12:23:38.766406 13746 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0315 12:23:38.766490 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:38.928233 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:23:39.017536 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes) I0315 12:23:39.036507 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/machines/server.pem --> /etc/docker/server.pem (1204 bytes) I0315 12:23:39.056681 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes) I0315 12:23:39.075306 13746 provision.go:86] duration metric: configureAuth took 678.357079ms I0315 12:23:39.075316 13746 ubuntu.go:193] setting minikube options for container-runtime I0315 12:23:39.075509 13746 config.go:176] Loaded profile config "minikube": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.21.10 I0315 12:23:39.075592 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:39.236371 13746 main.go:130] libmachine: Using SSH client type: native I0315 12:23:39.236557 13746 main.go:130] libmachine: &{{{ 0 [] [] []} docker [0x43a2480] 0x43a5560 [] 0s} 127.0.0.1 65194 } I0315 12:23:39.236570 13746 main.go:130] libmachine: About to run SSH command: df --output=fstype / | tail -n 1 I0315 12:23:39.360705 13746 main.go:130] libmachine: SSH cmd err, output: : overlay I0315 12:23:39.360716 13746 ubuntu.go:71] root file system type: overlay I0315 12:23:39.360902 13746 provision.go:309] Updating docker unit: /lib/systemd/system/docker.service ... I0315 12:23:39.361009 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:39.523919 13746 main.go:130] libmachine: Using SSH client type: native I0315 12:23:39.524092 13746 main.go:130] libmachine: &{{{ 0 [] [] []} docker [0x43a2480] 0x43a5560 [] 0s} 127.0.0.1 65194 } I0315 12:23:39.524149 13746 main.go:130] libmachine: About to run SSH command: sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket StartLimitBurst=3 StartLimitIntervalSec=60 [Service] Type=notify Restart=on-failure # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP \$MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target " | sudo tee /lib/systemd/system/docker.service.new I0315 12:23:39.657592 13746 main.go:130] libmachine: SSH cmd err, output: : [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket StartLimitBurst=3 StartLimitIntervalSec=60 [Service] Type=notify Restart=on-failure # This file is a systemd drop-in unit that inherits from the base dockerd configuration. # The base configuration already specifies an 'ExecStart=...' command. The first directive # here is to clear out that command inherited from the base configuration. Without this, # the command from the base configuration and the command specified here are treated as # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd # will catch this invalid input and refuse to start the service with an error like: # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other # container runtimes. If left unlimited, it may result in OOM issues with MySQL. ExecStart= ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 ExecReload=/bin/kill -s HUP $MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity # Uncomment TasksMax if your systemd version supports it. # Only systemd 226 and above support this version. TasksMax=infinity TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process [Install] WantedBy=multi-user.target I0315 12:23:39.657708 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:39.819773 13746 main.go:130] libmachine: Using SSH client type: native I0315 12:23:39.819953 13746 main.go:130] libmachine: &{{{ 0 [] [] []} docker [0x43a2480] 0x43a5560 [] 0s} 127.0.0.1 65194 } I0315 12:23:39.819964 13746 main.go:130] libmachine: About to run SSH command: sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; } I0315 12:23:40.459102 13746 main.go:130] libmachine: SSH cmd err, output: : --- /lib/systemd/system/docker.service 2021-12-13 11:43:42.000000000 +0000 +++ /lib/systemd/system/docker.service.new 2022-03-15 17:23:39.659897190 +0000 @@ -1,30 +1,32 @@ [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com +BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target -Requires=docker.socket containerd.service +Requires=docker.socket +StartLimitBurst=3 +StartLimitIntervalSec=60 [Service] Type=notify -# the default is not to use systemd for cgroups because the delegate issues still -# exists and systemd currently does not support the cgroup feature set required -# for containers run by docker -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -ExecReload=/bin/kill -s HUP $MAINPID -TimeoutSec=0 -RestartSec=2 -Restart=always - -# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. -# Both the old, and new location are accepted by systemd 229 and up, so using the old location -# to make them work for either version of systemd. -StartLimitBurst=3 +Restart=on-failure -# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. -# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make -# this option work for either version of systemd. -StartLimitInterval=60s + + +# This file is a systemd drop-in unit that inherits from the base dockerd configuration. +# The base configuration already specifies an 'ExecStart=...' command. The first directive +# here is to clear out that command inherited from the base configuration. Without this, +# the command from the base configuration and the command specified here are treated as +# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd +# will catch this invalid input and refuse to start the service with an error like: +# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + +# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other +# container runtimes. If left unlimited, it may result in OOM issues with MySQL. +ExecStart= +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 +ExecReload=/bin/kill -s HUP $MAINPID # Having non-zero Limit*s causes performance problems due to accounting overhead # in the kernel. We recommend using cgroups to do container-local accounting. @@ -32,16 +34,16 @@ LimitNPROC=infinity LimitCORE=infinity -# Comment TasksMax if your systemd version does not support it. -# Only systemd 226 and above support this option. +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. TasksMax=infinity +TimeoutStartSec=0 # set delegate yes so that systemd does not reset the cgroups of docker containers Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process -OOMScoreAdjust=-500 [Install] WantedBy=multi-user.target Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install. Executing: /lib/systemd/systemd-sysv-install enable docker I0315 12:23:40.459128 13746 machine.go:91] provisioned docker machine in 2.654955041s I0315 12:23:40.459136 13746 client.go:171] LocalClient.Create took 17.118876251s I0315 12:23:40.459155 13746 start.go:168] duration metric: libmachine.API.Create for "minikube" took 17.118934156s I0315 12:23:40.459162 13746 start.go:267] post-start starting for "minikube" (driver="docker") I0315 12:23:40.459165 13746 start.go:277] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0315 12:23:40.459328 13746 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0315 12:23:40.459423 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:40.622500 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:23:40.712544 13746 ssh_runner.go:195] Run: cat /etc/os-release I0315 12:23:40.716426 13746 main.go:130] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0315 12:23:40.716437 13746 main.go:130] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0315 12:23:40.716447 13746 main.go:130] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0315 12:23:40.716457 13746 info.go:137] Remote host: Ubuntu 20.04.2 LTS I0315 12:23:40.716466 13746 filesync.go:126] Scanning /Users/alekim64/.minikube/addons for local assets ... I0315 12:23:40.716588 13746 filesync.go:126] Scanning /Users/alekim64/.minikube/files for local assets ... I0315 12:23:40.716644 13746 start.go:270] post-start completed in 257.474336ms I0315 12:23:40.717289 13746 cli_runner.go:133] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" minikube I0315 12:23:40.883523 13746 profile.go:148] Saving config to /Users/alekim64/.minikube/profiles/minikube/config.json ... I0315 12:23:40.884060 13746 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" I0315 12:23:40.884134 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:41.051531 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:23:41.137849 13746 start.go:129] duration metric: createHost completed in 17.822236617s I0315 12:23:41.137858 13746 start.go:80] releasing machines lock for "minikube", held for 17.822342681s I0315 12:23:41.137983 13746 cli_runner.go:133] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" minikube I0315 12:23:41.303776 13746 ssh_runner.go:195] Run: curl -sS -m 2 https://k8s.gcr.io/ I0315 12:23:41.303791 13746 ssh_runner.go:195] Run: systemctl --version I0315 12:23:41.303872 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:41.303875 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:23:41.479871 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:23:41.489974 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:23:41.730600 13746 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd I0315 12:23:41.741631 13746 ssh_runner.go:195] Run: sudo systemctl cat docker.service I0315 12:23:41.752870 13746 cruntime.go:272] skipping containerd shutdown because we are bound to it I0315 12:23:41.752965 13746 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio I0315 12:23:41.763632 13746 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/dockershim.sock image-endpoint: unix:///var/run/dockershim.sock " | sudo tee /etc/crictl.yaml" I0315 12:23:41.778549 13746 ssh_runner.go:195] Run: sudo systemctl unmask docker.service I0315 12:23:41.852716 13746 ssh_runner.go:195] Run: sudo systemctl enable docker.socket I0315 12:23:41.924522 13746 ssh_runner.go:195] Run: sudo systemctl cat docker.service I0315 12:23:41.935740 13746 ssh_runner.go:195] Run: sudo systemctl daemon-reload I0315 12:23:42.019516 13746 ssh_runner.go:195] Run: sudo systemctl start docker I0315 12:23:42.030571 13746 ssh_runner.go:195] Run: docker version --format {{.Server.Version}} I0315 12:23:42.077323 13746 ssh_runner.go:195] Run: docker version --format {{.Server.Version}} I0315 12:23:42.159473 13746 out.go:203] 🐳 Preparing Kubernetes v1.21.10 on Docker 20.10.12 ... I0315 12:23:42.159593 13746 cli_runner.go:133] Run: docker exec -t minikube dig +short host.docker.internal I0315 12:23:42.416015 13746 network.go:96] got host ip for mount in container by digging dns: 192.168.65.2 I0315 12:23:42.416160 13746 ssh_runner.go:195] Run: grep 192.168.65.2 host.minikube.internal$ /etc/hosts I0315 12:23:42.421287 13746 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.65.2 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" I0315 12:23:42.433293 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" minikube I0315 12:23:42.630801 13746 out.go:176] ▪ kubelet.housekeeping-interval=5m I0315 12:23:42.630893 13746 preload.go:132] Checking if preload exists for k8s version v1.21.10 and runtime docker I0315 12:23:42.631005 13746 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}} I0315 12:23:42.666796 13746 docker.go:606] Got preloaded images: -- stdout -- k8s.gcr.io/kube-apiserver:v1.21.10 k8s.gcr.io/kube-proxy:v1.21.10 k8s.gcr.io/kube-scheduler:v1.21.10 k8s.gcr.io/kube-controller-manager:v1.21.10 kubernetesui/dashboard:v2.3.1 kubernetesui/metrics-scraper:v1.0.7 gcr.io/k8s-minikube/storage-provisioner:v5 k8s.gcr.io/pause:3.4.1 k8s.gcr.io/coredns/coredns:v1.8.0 k8s.gcr.io/etcd:3.4.13-0 -- /stdout -- I0315 12:23:42.666805 13746 docker.go:537] Images already preloaded, skipping extraction I0315 12:23:42.666918 13746 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}} I0315 12:23:42.701235 13746 docker.go:606] Got preloaded images: -- stdout -- k8s.gcr.io/kube-apiserver:v1.21.10 k8s.gcr.io/kube-proxy:v1.21.10 k8s.gcr.io/kube-controller-manager:v1.21.10 k8s.gcr.io/kube-scheduler:v1.21.10 kubernetesui/dashboard:v2.3.1 kubernetesui/metrics-scraper:v1.0.7 gcr.io/k8s-minikube/storage-provisioner:v5 k8s.gcr.io/pause:3.4.1 k8s.gcr.io/coredns/coredns:v1.8.0 k8s.gcr.io/etcd:3.4.13-0 -- /stdout -- I0315 12:23:42.701249 13746 cache_images.go:84] Images are preloaded, skipping loading I0315 12:23:42.701339 13746 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}} I0315 12:23:42.788553 13746 cni.go:93] Creating CNI manager for "" I0315 12:23:42.788563 13746 cni.go:167] CNI unnecessary in this configuration, recommending no CNI I0315 12:23:42.788579 13746 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16 I0315 12:23:42.788593 13746 kubeadm.go:158] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.21.10 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:minikube NodeName:minikube DNSDomain:cluster.local CRISocket:/var/run/dockershim.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NoTaintMaster:true NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[]} I0315 12:23:42.788698 13746 kubeadm.go:162] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.49.2 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: /var/run/dockershim.sock name: "minikube" kubeletExtraArgs: node-ip: 192.168.49.2 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "192.168.49.2"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" controllerManager: extraArgs: allocate-node-cidrs: "true" leader-elect: "false" scheduler: extraArgs: leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 dns: type: CoreDNS etcd: local: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" kubernetesVersion: v1.21.10 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%!"(MISSING) nodefs.inodesFree: "0%!"(MISSING) imagefs.available: "0%!"(MISSING) failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 0.0.0.0:10249 conntrack: maxPerCore: 0 # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" tcpEstablishedTimeout: 0s # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" tcpCloseWaitTimeout: 0s I0315 12:23:42.788778 13746 kubeadm.go:936] kubelet [Unit] Wants=docker.socket [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.21.10/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime=docker --hostname-override=minikube --housekeeping-interval=5m --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2 [Install] config: {KubernetesVersion:v1.21.10 ClusterName:minikube Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} I0315 12:23:42.788866 13746 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.21.10 I0315 12:23:42.797572 13746 binaries.go:44] Found k8s binaries, skipping transfer I0315 12:23:42.797655 13746 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0315 12:23:42.806253 13746 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (362 bytes) I0315 12:23:42.820237 13746 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (353 bytes) I0315 12:23:42.834483 13746 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2051 bytes) I0315 12:23:42.848748 13746 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts I0315 12:23:42.852846 13746 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" I0315 12:23:42.864569 13746 certs.go:54] Setting up /Users/alekim64/.minikube/profiles/minikube for IP: 192.168.49.2 I0315 12:23:42.864943 13746 certs.go:182] skipping minikubeCA CA generation: /Users/alekim64/.minikube/ca.key I0315 12:23:42.865047 13746 certs.go:182] skipping proxyClientCA CA generation: /Users/alekim64/.minikube/proxy-client-ca.key I0315 12:23:42.865099 13746 certs.go:302] generating minikube-user signed cert: /Users/alekim64/.minikube/profiles/minikube/client.key I0315 12:23:42.865119 13746 crypto.go:68] Generating cert /Users/alekim64/.minikube/profiles/minikube/client.crt with IP's: [] I0315 12:23:42.966158 13746 crypto.go:156] Writing cert to /Users/alekim64/.minikube/profiles/minikube/client.crt ... I0315 12:23:42.966169 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/client.crt: {Name:mk96d825e6cea7aacddbf08af6b01ea262294dc9 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:42.966498 13746 crypto.go:164] Writing key to /Users/alekim64/.minikube/profiles/minikube/client.key ... I0315 12:23:42.966504 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/client.key: {Name:mk6a9a850aa847e54d331956dfde469d54c573de Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:42.966769 13746 certs.go:302] generating minikube signed cert: /Users/alekim64/.minikube/profiles/minikube/apiserver.key.dd3b5fb2 I0315 12:23:42.966802 13746 crypto.go:68] Generating cert /Users/alekim64/.minikube/profiles/minikube/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1] I0315 12:23:43.276472 13746 crypto.go:156] Writing cert to /Users/alekim64/.minikube/profiles/minikube/apiserver.crt.dd3b5fb2 ... I0315 12:23:43.276483 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/apiserver.crt.dd3b5fb2: {Name:mk61fdc80edda655f8e9586c99e2b123e79d4fb7 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:43.276799 13746 crypto.go:164] Writing key to /Users/alekim64/.minikube/profiles/minikube/apiserver.key.dd3b5fb2 ... I0315 12:23:43.276806 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/apiserver.key.dd3b5fb2: {Name:mk5d64f798d94b5703ca3b54c728f02e24ce0bdd Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:43.277067 13746 certs.go:320] copying /Users/alekim64/.minikube/profiles/minikube/apiserver.crt.dd3b5fb2 -> /Users/alekim64/.minikube/profiles/minikube/apiserver.crt I0315 12:23:43.277345 13746 certs.go:324] copying /Users/alekim64/.minikube/profiles/minikube/apiserver.key.dd3b5fb2 -> /Users/alekim64/.minikube/profiles/minikube/apiserver.key I0315 12:23:43.277548 13746 certs.go:302] generating aggregator signed cert: /Users/alekim64/.minikube/profiles/minikube/proxy-client.key I0315 12:23:43.277569 13746 crypto.go:68] Generating cert /Users/alekim64/.minikube/profiles/minikube/proxy-client.crt with IP's: [] I0315 12:23:43.317746 13746 crypto.go:156] Writing cert to /Users/alekim64/.minikube/profiles/minikube/proxy-client.crt ... I0315 12:23:43.317756 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/proxy-client.crt: {Name:mk052f7ab0f2685785db8e2611cde01b485878a4 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:43.318115 13746 crypto.go:164] Writing key to /Users/alekim64/.minikube/profiles/minikube/proxy-client.key ... I0315 12:23:43.318123 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.minikube/profiles/minikube/proxy-client.key: {Name:mka961ab6fbb3c89065cf8df5bb4e7029cbbfb7e Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:23:43.318729 13746 certs.go:388] found cert: /Users/alekim64/.minikube/certs/Users/alekim64/.minikube/certs/ca-key.pem (1679 bytes) I0315 12:23:43.318789 13746 certs.go:388] found cert: /Users/alekim64/.minikube/certs/Users/alekim64/.minikube/certs/ca.pem (1082 bytes) I0315 12:23:43.318836 13746 certs.go:388] found cert: /Users/alekim64/.minikube/certs/Users/alekim64/.minikube/certs/cert.pem (1127 bytes) I0315 12:23:43.318942 13746 certs.go:388] found cert: /Users/alekim64/.minikube/certs/Users/alekim64/.minikube/certs/key.pem (1679 bytes) I0315 12:23:43.319550 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/profiles/minikube/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes) I0315 12:23:43.339855 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/profiles/minikube/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0315 12:23:43.360067 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/profiles/minikube/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes) I0315 12:23:43.380072 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/profiles/minikube/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes) I0315 12:23:43.401541 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes) I0315 12:23:43.421997 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0315 12:23:43.442413 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes) I0315 12:23:43.463090 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes) I0315 12:23:43.483553 13746 ssh_runner.go:362] scp /Users/alekim64/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes) I0315 12:23:43.509026 13746 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes) I0315 12:23:43.527786 13746 ssh_runner.go:195] Run: openssl version I0315 12:23:43.536905 13746 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0315 12:23:43.547434 13746 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0315 12:23:43.553643 13746 certs.go:431] hashing: -rw-r--r-- 1 root root 1111 Sep 9 2021 /usr/share/ca-certificates/minikubeCA.pem I0315 12:23:43.553738 13746 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0315 12:23:43.561044 13746 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0315 12:23:43.571239 13746 kubeadm.go:391] StartCluster: {Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2 Memory:7911 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.21.10 ClusterName:minikube Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin: FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: ExtraOptions:[{Component:kubelet Key:housekeeping-interval Value:5m}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.21.10 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/Users:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false} I0315 12:23:43.571392 13746 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}} I0315 12:23:43.613204 13746 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0315 12:23:43.623085 13746 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0315 12:23:43.633386 13746 kubeadm.go:221] ignoring SystemVerification for kubeadm because of docker driver I0315 12:23:43.633482 13746 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0315 12:23:43.643550 13746 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0315 12:23:43.643580 13746 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.21.10:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0315 12:23:44.528001 13746 out.go:203] ▪ Generating certificates and keys ... I0315 12:23:47.314098 13746 out.go:203] ▪ Booting up control plane ... I0315 12:24:01.858701 13746 out.go:203] ▪ Configuring RBAC rules ... I0315 12:24:02.240845 13746 cni.go:93] Creating CNI manager for "" I0315 12:24:02.240853 13746 cni.go:167] CNI unnecessary in this configuration, recommending no CNI I0315 12:24:02.240881 13746 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0315 12:24:02.241022 13746 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.21.10/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0315 12:24:02.241053 13746 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.21.10/kubectl label nodes minikube.k8s.io/version=v1.25.2 minikube.k8s.io/commit=362d5fdc0a3dbee389b3d3f1034e8023e72bd3a7 minikube.k8s.io/name=minikube minikube.k8s.io/updated_at=2022_03_15T12_24_02_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0315 12:24:02.501469 13746 ops.go:34] apiserver oom_adj: -16 I0315 12:24:02.501536 13746 kubeadm.go:1020] duration metric: took 260.635253ms to wait for elevateKubeSystemPrivileges. I0315 12:24:02.501552 13746 kubeadm.go:393] StartCluster complete in 18.92998817s I0315 12:24:02.501570 13746 settings.go:142] acquiring lock: {Name:mk9c6afdd402710e13109918348df9d11a17392f Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:24:02.501690 13746 settings.go:150] Updating kubeconfig: /Users/alekim64/.kube/config I0315 12:24:02.503726 13746 lock.go:35] WriteFile acquiring /Users/alekim64/.kube/config: {Name:mk494c439a68c614ffb90bf92600f88fe11a5d45 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0315 12:24:03.020990 13746 kapi.go:244] deployment "coredns" in namespace "kube-system" and context "minikube" rescaled to 1 I0315 12:24:03.021019 13746 start.go:208] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.21.10 ContainerRuntime:docker ControlPlane:true Worker:true} I0315 12:24:03.021035 13746 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.21.10/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml" I0315 12:24:03.021046 13746 addons.go:415] enableAddons start: toEnable=map[], additional=[] I0315 12:24:03.044670 13746 out.go:176] 🔎 Verifying Kubernetes components... I0315 12:24:03.021231 13746 config.go:176] Loaded profile config "minikube": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.21.10 I0315 12:24:03.044746 13746 addons.go:65] Setting default-storageclass=true in profile "minikube" I0315 12:24:03.044747 13746 addons.go:65] Setting storage-provisioner=true in profile "minikube" I0315 12:24:03.044759 13746 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "minikube" I0315 12:24:03.044763 13746 addons.go:153] Setting addon storage-provisioner=true in "minikube" W0315 12:24:03.044768 13746 addons.go:165] addon storage-provisioner should already be in state true I0315 12:24:03.044795 13746 host.go:66] Checking if "minikube" exists ... I0315 12:24:03.044848 13746 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet I0315 12:24:03.045526 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Status}} I0315 12:24:03.045562 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Status}} I0315 12:24:03.064627 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "8443/tcp") 0).HostPort}}'" minikube I0315 12:24:03.141040 13746 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.21.10/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.65.2 host.minikube.internal\n fallthrough\n }' | sudo /var/lib/minikube/binaries/v1.21.10/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -" I0315 12:24:03.293301 13746 out.go:176] ▪ Using image gcr.io/k8s-minikube/storage-provisioner:v5 I0315 12:24:03.284385 13746 addons.go:153] Setting addon default-storageclass=true in "minikube" W0315 12:24:03.293331 13746 addons.go:165] addon default-storageclass should already be in state true I0315 12:24:03.290002 13746 api_server.go:51] waiting for apiserver process to appear ... I0315 12:24:03.293359 13746 host.go:66] Checking if "minikube" exists ... I0315 12:24:03.293438 13746 addons.go:348] installing /etc/kubernetes/addons/storage-provisioner.yaml I0315 12:24:03.293443 13746 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes) I0315 12:24:03.293482 13746 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0315 12:24:03.293568 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:24:03.294873 13746 cli_runner.go:133] Run: docker container inspect minikube --format={{.State.Status}} I0315 12:24:03.485514 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:24:03.500874 13746 addons.go:348] installing /etc/kubernetes/addons/storageclass.yaml I0315 12:24:03.500884 13746 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0315 12:24:03.501008 13746 cli_runner.go:133] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube I0315 12:24:03.598815 13746 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.21.10/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0315 12:24:03.697674 13746 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:65194 SSHKeyPath:/Users/alekim64/.minikube/machines/minikube/id_rsa Username:docker} I0315 12:24:03.794396 13746 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.21.10/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0315 12:24:04.196997 13746 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.21.10/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.65.2 host.minikube.internal\n fallthrough\n }' | sudo /var/lib/minikube/binaries/v1.21.10/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.055910819s) I0315 12:24:04.197010 13746 start.go:777] {"host.minikube.internal": 192.168.65.2} host record injected into CoreDNS I0315 12:24:04.197081 13746 api_server.go:71] duration metric: took 1.176020916s to wait for apiserver process to appear ... I0315 12:24:04.197100 13746 api_server.go:87] waiting for apiserver healthz status ... I0315 12:24:04.197112 13746 api_server.go:240] Checking apiserver healthz at https://127.0.0.1:65198/healthz ... I0315 12:24:04.203749 13746 api_server.go:266] https://127.0.0.1:65198/healthz returned 200: ok I0315 12:24:04.205379 13746 api_server.go:140] control plane version: v1.21.10 I0315 12:24:04.205389 13746 api_server.go:130] duration metric: took 8.285662ms to wait for apiserver health ... I0315 12:24:04.205398 13746 system_pods.go:43] waiting for kube-system pods to appear ... I0315 12:24:04.210569 13746 system_pods.go:59] 0 kube-system pods found I0315 12:24:04.210590 13746 retry.go:31] will retry after 263.082536ms: only 0 pod(s) have shown up I0315 12:24:04.280483 13746 out.go:176] 🌟 Enabled addons: storage-provisioner, default-storageclass I0315 12:24:04.280516 13746 addons.go:417] enableAddons completed in 1.259458339s I0315 12:24:04.478610 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:04.478625 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:04.478632 13746 retry.go:31] will retry after 381.329545ms: only 1 pod(s) have shown up I0315 12:24:04.864847 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:04.864857 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:04.864864 13746 retry.go:31] will retry after 422.765636ms: only 1 pod(s) have shown up I0315 12:24:05.294260 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:05.294271 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:05.294278 13746 retry.go:31] will retry after 473.074753ms: only 1 pod(s) have shown up I0315 12:24:05.771733 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:05.771742 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:05.771749 13746 retry.go:31] will retry after 587.352751ms: only 1 pod(s) have shown up I0315 12:24:06.362653 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:06.362661 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:06.362671 13746 retry.go:31] will retry after 834.206799ms: only 1 pod(s) have shown up I0315 12:24:07.200453 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:07.200467 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:07.200473 13746 retry.go:31] will retry after 746.553905ms: only 1 pod(s) have shown up I0315 12:24:07.952783 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:07.953529 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:07.953538 13746 retry.go:31] will retry after 987.362415ms: only 1 pod(s) have shown up I0315 12:24:08.949927 13746 system_pods.go:59] 1 kube-system pods found I0315 12:24:08.949936 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:08.949942 13746 retry.go:31] will retry after 1.189835008s: only 1 pod(s) have shown up I0315 12:24:10.148521 13746 system_pods.go:59] 5 kube-system pods found I0315 12:24:10.148531 13746 system_pods.go:61] "etcd-minikube" [55c0c1ec-1a5a-480e-997e-815cb0182887] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd]) I0315 12:24:10.148535 13746 system_pods.go:61] "kube-apiserver-minikube" [f2b08832-86d6-4e63-82e4-c9579a8a1e42] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver]) I0315 12:24:10.148539 13746 system_pods.go:61] "kube-controller-manager-minikube" [47fe2768-c253-467c-81b6-0807116914d4] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) I0315 12:24:10.148541 13746 system_pods.go:61] "kube-scheduler-minikube" [a8b45840-b4cf-4680-afae-41777ca227c3] Pending I0315 12:24:10.148543 13746 system_pods.go:61] "storage-provisioner" [c86bb5b6-08dc-4ae7-9b0f-32279284627a] Pending: PodScheduled:Unschedulable (0/1 nodes are available: 1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate.) I0315 12:24:10.148547 13746 system_pods.go:74] duration metric: took 5.943041991s to wait for pod list to return data ... I0315 12:24:10.148551 13746 kubeadm.go:548] duration metric: took 7.127395733s to wait for : map[apiserver:true system_pods:true] ... I0315 12:24:10.148558 13746 node_conditions.go:102] verifying NodePressure condition ... I0315 12:24:10.152629 13746 node_conditions.go:122] node storage ephemeral capacity is 131848196Ki I0315 12:24:10.152639 13746 node_conditions.go:123] node cpu capacity is 8 I0315 12:24:10.152646 13746 node_conditions.go:105] duration metric: took 4.085466ms to run NodePressure ... I0315 12:24:10.152651 13746 start.go:213] waiting for startup goroutines ... I0315 12:24:10.196318 13746 start.go:496] kubectl: 1.23.4, cluster: 1.21.10 (minor skew: 2) I0315 12:24:10.220035 13746 out.go:176] W0315 12:24:10.220334 13746 out.go:241] ❗ /usr/local/bin/kubectl is version 1.23.4, which may have incompatibilites with Kubernetes 1.21.10. I0315 12:24:10.243621 13746 out.go:176] ▪ Want kubectl v1.21.10? Try 'minikube kubectl -- get pods -A' I0315 12:24:10.268619 13746 out.go:176] 🏄 Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default * * ==> Docker <== * -- Logs begin at Tue 2022-03-15 17:23:36 UTC, end at Tue 2022-03-15 17:39:07 UTC. -- Mar 15 17:23:36 minikube systemd[1]: Starting Docker Application Container Engine... Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.478473015Z" level=info msg="Starting up" Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.480261049Z" level=info msg="parsed scheme: \"unix\"" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.480294769Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.480330673Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.480344665Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.481612166Z" level=info msg="parsed scheme: \"unix\"" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.481645433Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.481662094Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.481681146Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.487246692Z" level=info msg="[graphdriver] using prior storage driver: overlay2" Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.504495465Z" level=info msg="Loading containers: start." Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.564145654Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address" Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.598386930Z" level=info msg="Loading containers: done." Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.611358457Z" level=info msg="Docker daemon" commit=459d0df graphdriver(s)=overlay2 version=20.10.12 Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.611465463Z" level=info msg="Daemon has completed initialization" Mar 15 17:23:36 minikube systemd[1]: Started Docker Application Container Engine. Mar 15 17:23:36 minikube dockerd[130]: time="2022-03-15T17:23:36.642644444Z" level=info msg="API listen on /run/docker.sock" Mar 15 17:23:40 minikube systemd[1]: docker.service: Current command vanished from the unit file, execution of the command list won't be resumed. Mar 15 17:23:40 minikube systemd[1]: Stopping Docker Application Container Engine... Mar 15 17:23:40 minikube dockerd[130]: time="2022-03-15T17:23:40.247378260Z" level=info msg="Processing signal 'terminated'" Mar 15 17:23:40 minikube dockerd[130]: time="2022-03-15T17:23:40.248524298Z" level=info msg="stopping event stream following graceful shutdown" error="" module=libcontainerd namespace=moby Mar 15 17:23:40 minikube dockerd[130]: time="2022-03-15T17:23:40.249255149Z" level=info msg="Daemon shutdown complete" Mar 15 17:23:40 minikube dockerd[130]: time="2022-03-15T17:23:40.249349344Z" level=info msg="stopping event stream following graceful shutdown" error="context canceled" module=libcontainerd namespace=plugins.moby Mar 15 17:23:40 minikube systemd[1]: docker.service: Succeeded. Mar 15 17:23:40 minikube systemd[1]: Stopped Docker Application Container Engine. Mar 15 17:23:40 minikube systemd[1]: Starting Docker Application Container Engine... Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.306218129Z" level=info msg="Starting up" Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.308132436Z" level=info msg="parsed scheme: \"unix\"" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.308164997Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.308195129Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.308209895Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.309347297Z" level=info msg="parsed scheme: \"unix\"" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.309380131Z" level=info msg="scheme \"unix\" not registered, fallback to default scheme" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.309396997Z" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock 0 }] }" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.309405580Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.312848921Z" level=info msg="[graphdriver] using prior storage driver: overlay2" Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.317562285Z" level=info msg="Loading containers: start." Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.399767146Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address" Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.431792151Z" level=info msg="Loading containers: done." Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.443344415Z" level=info msg="Docker daemon" commit=459d0df graphdriver(s)=overlay2 version=20.10.12 Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.443411418Z" level=info msg="Daemon has completed initialization" Mar 15 17:23:40 minikube systemd[1]: Started Docker Application Container Engine. Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.470180177Z" level=info msg="API listen on [::]:2376" Mar 15 17:23:40 minikube dockerd[385]: time="2022-03-15T17:23:40.473303100Z" level=info msg="API listen on /var/run/docker.sock" Mar 15 17:27:43 minikube dockerd[385]: time="2022-03-15T17:27:43.905912538Z" level=info msg="parsed scheme: \"\"" module=grpc Mar 15 17:27:43 minikube dockerd[385]: time="2022-03-15T17:27:43.905969243Z" level=info msg="scheme \"\" not registered, fallback to default scheme" module=grpc Mar 15 17:27:43 minikube dockerd[385]: time="2022-03-15T17:27:43.906030897Z" level=info msg="ccResolverWrapper: sending update to cc: {[{localhost 0 }] }" module=grpc Mar 15 17:27:43 minikube dockerd[385]: time="2022-03-15T17:27:43.906052516Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc Mar 15 17:30:34 minikube dockerd[385]: time="2022-03-15T17:30:34.451275683Z" level=warning msg="grpc: addrConn.createTransport failed to connect to {localhost 0 }. Err :connection error: desc = \"transport: Error while dialing only one connection allowed\". Reconnecting..." module=grpc Mar 15 17:30:34 minikube dockerd[385]: time="2022-03-15T17:30:34.870123195Z" level=info msg="parsed scheme: \"\"" module=grpc Mar 15 17:30:34 minikube dockerd[385]: time="2022-03-15T17:30:34.870199682Z" level=info msg="scheme \"\" not registered, fallback to default scheme" module=grpc Mar 15 17:30:34 minikube dockerd[385]: time="2022-03-15T17:30:34.870222449Z" level=info msg="ccResolverWrapper: sending update to cc: {[{localhost 0 }] }" module=grpc Mar 15 17:30:34 minikube dockerd[385]: time="2022-03-15T17:30:34.870236324Z" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc Mar 15 17:31:44 minikube dockerd[385]: time="2022-03-15T17:31:44.398532018Z" level=warning msg="grpc: addrConn.createTransport failed to connect to {localhost 0 }. Err :connection error: desc = \"transport: Error while dialing only one connection allowed\". Reconnecting..." module=grpc Mar 15 17:31:44 minikube dockerd[385]: time="2022-03-15T17:31:44.963800854Z" level=info msg="Layer sha256:f83cec30d74d5e9f1c081f9674bd65388bb2ae693e37042dbd499c18e1907a36 cleaned up" * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID e60362885a114 1c31904df36c9 7 minutes ago Running my-service 0 e9341b18e1d33 4e19ec8b326b3 7baa1221237a8 7 minutes ago Running postgresql 0 0515f6c21ed42 f88d62b00b23c 6e38f40d628db 14 minutes ago Running storage-provisioner 0 4f31f6f7c68dc f11fa83ab9b03 296a6d5035e2d 14 minutes ago Running coredns 0 fbe7bf6d48124 59b6e9d8d70ac ab8993ba3211b 14 minutes ago Running kube-proxy 0 63b642abaf8dd e60aafc8d983c 2f776f4731317 15 minutes ago Running kube-scheduler 0 fa2612686324b fee69fc682571 704b64a9bcd2f 15 minutes ago Running kube-apiserver 0 f36cdedd7773b f442d867ce23e 0369cf4303ffd 15 minutes ago Running etcd 0 3d8c477899c27 765d0d317ead5 eeb3ff9374071 15 minutes ago Running kube-controller-manager 0 fc305e336019c * * ==> coredns [f11fa83ab9b0] <== * .:53 [INFO] plugin/reload: Running configuration MD5 = c23ed519c17e71ee396ed052e6209e94 CoreDNS-1.8.0 linux/amd64, go1.15.3, 054c9ae * * ==> describe nodes <== * Name: minikube Roles: control-plane,master Labels: beta.kubernetes.io/arch=amd64 beta.kubernetes.io/os=linux kubernetes.io/arch=amd64 kubernetes.io/hostname=minikube kubernetes.io/os=linux minikube.k8s.io/commit=362d5fdc0a3dbee389b3d3f1034e8023e72bd3a7 minikube.k8s.io/name=minikube minikube.k8s.io/primary=true minikube.k8s.io/updated_at=2022_03_15T12_24_02_0700 minikube.k8s.io/version=v1.25.2 node-role.kubernetes.io/control-plane= node-role.kubernetes.io/master= node.kubernetes.io/exclude-from-external-load-balancers= Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock node.alpha.kubernetes.io/ttl: 0 volumes.kubernetes.io/controller-managed-attach-detach: true CreationTimestamp: Tue, 15 Mar 2022 17:23:59 +0000 Taints: Unschedulable: false Lease: HolderIdentity: minikube AcquireTime: RenewTime: Tue, 15 Mar 2022 17:39:05 +0000 Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- MemoryPressure False Tue, 15 Mar 2022 17:37:12 +0000 Tue, 15 Mar 2022 17:23:56 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available DiskPressure False Tue, 15 Mar 2022 17:37:12 +0000 Tue, 15 Mar 2022 17:23:56 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure PIDPressure False Tue, 15 Mar 2022 17:37:12 +0000 Tue, 15 Mar 2022 17:23:56 +0000 KubeletHasSufficientPID kubelet has sufficient PID available Ready True Tue, 15 Mar 2022 17:37:12 +0000 Tue, 15 Mar 2022 17:24:14 +0000 KubeletReady kubelet is posting ready status Addresses: InternalIP: 192.168.49.2 Hostname: minikube Capacity: cpu: 8 ephemeral-storage: 131848196Ki hugepages-1Gi: 0 hugepages-2Mi: 0 memory: 8150424Ki pods: 110 Allocatable: cpu: 8 ephemeral-storage: 131848196Ki hugepages-1Gi: 0 hugepages-2Mi: 0 memory: 8150424Ki pods: 110 System Info: Machine ID: b6a262faae404a5db719705fd34b5c8b System UUID: 20d405e3-412e-4842-bca5-5b743817dc4d Boot ID: c3729d6d-425c-4c70-ba4d-c420d3448f00 Kernel Version: 5.10.104-linuxkit OS Image: Ubuntu 20.04.2 LTS Operating System: linux Architecture: amd64 Container Runtime Version: docker://20.10.12 Kubelet Version: v1.21.10 Kube-Proxy Version: v1.21.10 PodCIDR: 10.244.0.0/24 PodCIDRs: 10.244.0.0/24 Non-terminated Pods: (9 in total) Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age --------- ---- ------------ ---------- --------------- ------------- --- default my-service-545bddb88-txzgl 100m (1%!)(MISSING) 500m (6%!)(MISSING) 512Mi (6%!)(MISSING) 512Mi (6%!)(MISSING) 7m11s default postgres-postgresql-0 250m (3%!)(MISSING) 0 (0%!)(MISSING) 256Mi (3%!)(MISSING) 0 (0%!)(MISSING) 7m13s kube-system coredns-558bd4d5db-g6xct 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (2%!)(MISSING) 14m kube-system etcd-minikube 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (1%!)(MISSING) 0 (0%!)(MISSING) 14m kube-system kube-apiserver-minikube 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 14m kube-system kube-controller-manager-minikube 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 14m kube-system kube-proxy-n898j 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 14m kube-system kube-scheduler-minikube 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 14m kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 15m Allocated resources: (Total limits may be over 100 percent, i.e., overcommitted.) Resource Requests Limits -------- -------- ------ cpu 1100m (13%!)(MISSING) 500m (6%!)(MISSING) memory 938Mi (11%!)(MISSING) 682Mi (8%!)(MISSING) ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING) Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Starting 14m kubelet Starting kubelet. Normal NodeHasSufficientMemory 14m kubelet Node minikube status is now: NodeHasSufficientMemory Normal NodeHasNoDiskPressure 14m kubelet Node minikube status is now: NodeHasNoDiskPressure Normal NodeHasSufficientPID 14m kubelet Node minikube status is now: NodeHasSufficientPID Normal NodeNotReady 14m kubelet Node minikube status is now: NodeNotReady Normal NodeAllocatableEnforced 14m kubelet Updated Node Allocatable limit across pods Normal NodeReady 14m kubelet Node minikube status is now: NodeReady Normal Starting 14m kube-proxy Starting kube-proxy. * * ==> dmesg <== * [Mar15 16:48] ERROR: earlyprintk= earlyser already used [ +0.000000] ERROR: earlyprintk= earlyser already used [ +0.000000] ACPI BIOS Warning (bug): Incorrect checksum in table [DSDT] - 0x7E, should be 0xDB (20200925/tbprint-173) [ +0.195041] #2 [ +0.063007] #3 [ +0.062997] #4 [ +0.062994] #5 [ +0.062007] #6 [ +0.064096] #7 [ +2.006915] Hangcheck: starting hangcheck timer 0.9.1 (tick is 180 seconds, margin is 60 seconds). [ +0.010499] the cryptoloop driver has been deprecated and will be removed in in Linux 5.16 [ +0.016547] ACPI Error: Could not enable RealTimeClock event (20200925/evxfevnt-182) [ +0.001438] ACPI Warning: Could not enable fixed event - RealTimeClock (4) (20200925/evxface-618) [Mar15 16:49] grpcfuse: loading out-of-tree module taints kernel. * * ==> etcd [f442d867ce23] <== * 2022-03-15 17:29:54.824383 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:30:04.802851 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:30:14.825501 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:30:24.803052 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:30:34.782233 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:30:44.781970 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:30:54.781936 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:31:04.760754 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:31:14.761310 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:31:24.760682 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:31:34.740355 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:31:44.740355 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:31:54.740460 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:32:04.718845 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:32:14.719859 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:32:24.720078 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:32:34.698187 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:32:44.698490 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:32:54.698878 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:04.676715 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:14.677714 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:24.677575 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:34.656830 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:44.656564 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:54.656515 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:33:56.140298 I | mvcc: store.index: compact 660 2022-03-15 17:33:56.140945 I | mvcc: finished scheduled compaction at 660 (took 462.615µs) 2022-03-15 17:34:04.635253 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:34:14.636269 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:34:24.635457 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:34:34.615355 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:34:44.614185 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:34:54.615041 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:35:04.593381 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:35:14.593782 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:35:24.593480 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:35:34.572279 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:35:44.572256 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:35:54.572266 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:36:04.551342 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:36:14.551374 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:36:24.552146 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:36:34.530535 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:36:44.530805 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:36:54.530611 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:37:04.510110 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:37:14.509694 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:37:24.509718 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:37:34.488655 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:37:44.488734 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:37:54.488936 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:04.467836 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:14.467727 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:24.467627 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:34.446588 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:44.445529 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:54.446829 I | etcdserver/api/etcdhttp: /health OK (status code 200) 2022-03-15 17:38:55.935060 I | mvcc: store.index: compact 960 2022-03-15 17:38:55.936384 I | mvcc: finished scheduled compaction at 960 (took 846.699µs) 2022-03-15 17:39:04.425769 I | etcdserver/api/etcdhttp: /health OK (status code 200) * * ==> kernel <== * 17:39:08 up 50 min, 0 users, load average: 0.17, 0.21, 0.22 Linux minikube 5.10.104-linuxkit #1 SMP Wed Mar 9 19:05:23 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux PRETTY_NAME="Ubuntu 20.04.2 LTS" * * ==> kube-apiserver [fee69fc68257] <== * I0315 17:27:24.543278 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:27:24.543303 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:28:06.709620 1 client.go:360] parsed scheme: "passthrough" I0315 17:28:06.709675 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:28:06.709683 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:28:41.528430 1 client.go:360] parsed scheme: "passthrough" I0315 17:28:41.528476 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:28:41.528484 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:29:21.701708 1 client.go:360] parsed scheme: "passthrough" I0315 17:29:21.701753 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:29:21.701766 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:29:52.290607 1 client.go:360] parsed scheme: "passthrough" I0315 17:29:52.290655 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:29:52.290663 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:30:36.477457 1 client.go:360] parsed scheme: "passthrough" I0315 17:30:36.477488 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:30:36.477495 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:31:14.391748 1 client.go:360] parsed scheme: "passthrough" I0315 17:31:14.391787 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:31:14.391794 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:31:54.005539 1 controller.go:611] quota admission added evaluator for: statefulsets.apps I0315 17:31:55.354774 1 client.go:360] parsed scheme: "passthrough" I0315 17:31:55.354821 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:31:55.354829 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:32:26.323106 1 client.go:360] parsed scheme: "passthrough" I0315 17:32:26.323153 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:32:26.323160 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:33:07.713969 1 client.go:360] parsed scheme: "passthrough" I0315 17:33:07.714026 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:33:07.714034 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:33:39.707102 1 client.go:360] parsed scheme: "passthrough" I0315 17:33:39.707211 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:33:39.707235 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:34:12.724747 1 client.go:360] parsed scheme: "passthrough" I0315 17:34:12.724787 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:34:12.724793 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:34:50.489531 1 client.go:360] parsed scheme: "passthrough" I0315 17:34:50.489597 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:34:50.489603 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:35:27.234172 1 client.go:360] parsed scheme: "passthrough" I0315 17:35:27.234342 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:35:27.234349 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:35:58.735454 1 client.go:360] parsed scheme: "passthrough" I0315 17:35:58.735497 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:35:58.735504 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:36:34.259497 1 client.go:360] parsed scheme: "passthrough" I0315 17:36:34.259636 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:36:34.259662 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:37:06.197978 1 client.go:360] parsed scheme: "passthrough" I0315 17:37:06.198058 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:37:06.198065 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:37:37.072769 1 client.go:360] parsed scheme: "passthrough" I0315 17:37:37.072847 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:37:37.072854 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:38:19.416373 1 client.go:360] parsed scheme: "passthrough" I0315 17:38:19.416456 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:38:19.416464 1 clientconn.go:948] ClientConn switching balancer to "pick_first" I0315 17:38:58.800535 1 client.go:360] parsed scheme: "passthrough" I0315 17:38:58.800743 1 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://127.0.0.1:2379 0 }] } I0315 17:38:58.800773 1 clientconn.go:948] ClientConn switching balancer to "pick_first" * * ==> kube-controller-manager [765d0d317ead] <== * I0315 17:24:14.537991 1 range_allocator.go:172] Starting range CIDR allocator I0315 17:24:14.537995 1 shared_informer.go:240] Waiting for caches to sync for cidrallocator I0315 17:24:14.537999 1 shared_informer.go:247] Caches are synced for cidrallocator I0315 17:24:14.544366 1 shared_informer.go:247] Caches are synced for TTL after finished I0315 17:24:14.544671 1 range_allocator.go:373] Set node minikube PodCIDR to [10.244.0.0/24] I0315 17:24:14.548056 1 shared_informer.go:247] Caches are synced for GC I0315 17:24:14.551053 1 shared_informer.go:247] Caches are synced for crt configmap I0315 17:24:14.555870 1 shared_informer.go:240] Waiting for caches to sync for garbage collector I0315 17:24:14.559130 1 shared_informer.go:247] Caches are synced for certificate-csrapproving I0315 17:24:14.563066 1 shared_informer.go:247] Caches are synced for taint I0315 17:24:14.563166 1 node_lifecycle_controller.go:1398] Initializing eviction metric for zone: W0315 17:24:14.563223 1 node_lifecycle_controller.go:1013] Missing timestamp for Node minikube. Assuming now as a timestamp. I0315 17:24:14.563273 1 node_lifecycle_controller.go:1164] Controller detected that all Nodes are not-Ready. Entering master disruption mode. I0315 17:24:14.563227 1 taint_manager.go:187] "Starting NoExecuteTaintManager" I0315 17:24:14.563386 1 event.go:291] "Event occurred" object="minikube" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node minikube event: Registered Node minikube in Controller" I0315 17:24:14.568981 1 shared_informer.go:247] Caches are synced for persistent volume I0315 17:24:14.583839 1 shared_informer.go:247] Caches are synced for bootstrap_signer I0315 17:24:14.584077 1 shared_informer.go:247] Caches are synced for PV protection I0315 17:24:14.584551 1 shared_informer.go:247] Caches are synced for disruption I0315 17:24:14.584585 1 disruption.go:371] Sending events to api server. I0315 17:24:14.584686 1 shared_informer.go:247] Caches are synced for ReplicaSet I0315 17:24:14.584864 1 shared_informer.go:247] Caches are synced for cronjob I0315 17:24:14.584873 1 shared_informer.go:247] Caches are synced for ClusterRoleAggregator I0315 17:24:14.585175 1 shared_informer.go:247] Caches are synced for service account I0315 17:24:14.585647 1 shared_informer.go:247] Caches are synced for daemon sets I0315 17:24:14.585690 1 shared_informer.go:247] Caches are synced for job I0315 17:24:14.586830 1 shared_informer.go:247] Caches are synced for stateful set I0315 17:24:14.589582 1 event.go:291] "Event occurred" object="kube-system/kube-apiserver-minikube" kind="Pod" apiVersion="v1" type="Warning" reason="NodeNotReady" message="Node is not ready" I0315 17:24:14.598966 1 shared_informer.go:247] Caches are synced for PVC protection I0315 17:24:14.604229 1 shared_informer.go:247] Caches are synced for namespace I0315 17:24:14.604288 1 shared_informer.go:247] Caches are synced for ephemeral I0315 17:24:14.616067 1 shared_informer.go:247] Caches are synced for TTL I0315 17:24:14.622321 1 shared_informer.go:247] Caches are synced for HPA I0315 17:24:14.623556 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kubelet-client I0315 17:24:14.623601 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kubelet-serving I0315 17:24:14.623614 1 shared_informer.go:247] Caches are synced for deployment I0315 17:24:14.623624 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-kube-apiserver-client I0315 17:24:14.624136 1 shared_informer.go:247] Caches are synced for certificate-csrsigning-legacy-unknown I0315 17:24:14.624189 1 shared_informer.go:247] Caches are synced for ReplicationController I0315 17:24:14.624880 1 shared_informer.go:247] Caches are synced for expand I0315 17:24:14.783834 1 shared_informer.go:247] Caches are synced for endpoint I0315 17:24:14.809857 1 shared_informer.go:247] Caches are synced for endpoint_slice I0315 17:24:14.825372 1 shared_informer.go:247] Caches are synced for endpoint_slice_mirroring I0315 17:24:14.831361 1 shared_informer.go:247] Caches are synced for resource quota I0315 17:24:14.869536 1 shared_informer.go:247] Caches are synced for resource quota I0315 17:24:14.923965 1 shared_informer.go:247] Caches are synced for attach detach I0315 17:24:15.130973 1 event.go:291] "Event occurred" object="kube-system/kube-proxy" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-n898j" I0315 17:24:15.323908 1 shared_informer.go:247] Caches are synced for garbage collector I0315 17:24:15.323938 1 garbagecollector.go:151] Garbage collector: all resource monitors have synced. Proceeding to collect garbage I0315 17:24:15.329617 1 event.go:291] "Event occurred" object="kube-system/coredns" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-558bd4d5db to 1" I0315 17:24:15.356878 1 shared_informer.go:247] Caches are synced for garbage collector I0315 17:24:15.527851 1 event.go:291] "Event occurred" object="kube-system/coredns-558bd4d5db" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-558bd4d5db-g6xct" I0315 17:24:19.602439 1 node_lifecycle_controller.go:1191] Controller detected that some Nodes are Ready. Exiting master disruption mode. I0315 17:24:19.603117 1 event.go:291] "Event occurred" object="kube-system/coredns-558bd4d5db-g6xct" kind="Pod" apiVersion="" type="Normal" reason="TaintManagerEviction" message="Cancelling deletion of Pod kube-system/coredns-558bd4d5db-g6xct" I0315 17:31:54.023447 1 event.go:291] "Event occurred" object="default/postgres-postgresql" kind="StatefulSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="create Claim data-postgres-postgresql-0 Pod postgres-postgresql-0 in StatefulSet postgres-postgresql success" I0315 17:31:54.027077 1 event.go:291] "Event occurred" object="default/postgres-postgresql" kind="StatefulSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="create Pod postgres-postgresql-0 in StatefulSet postgres-postgresql successful" I0315 17:31:54.063492 1 event.go:291] "Event occurred" object="default/data-postgres-postgresql-0" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="ExternalProvisioning" message="waiting for a volume to be created, either by external provisioner \"k8s.io/minikube-hostpath\" or manually created by system administrator" I0315 17:31:54.063570 1 event.go:291] "Event occurred" object="default/data-postgres-postgresql-0" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="ExternalProvisioning" message="waiting for a volume to be created, either by external provisioner \"k8s.io/minikube-hostpath\" or manually created by system administrator" I0315 17:31:56.934119 1 event.go:291] "Event occurred" object="default/my-service" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set my-service-545bddb88 to 1" I0315 17:31:56.938619 1 event.go:291] "Event occurred" object="default/my-service-545bddb88" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: my-service-545bddb88-txzgl" * * ==> kube-proxy [59b6e9d8d70a] <== * I0315 17:24:15.780318 1 node.go:172] Successfully retrieved node IP: 192.168.49.2 I0315 17:24:15.780389 1 server_others.go:140] Detected node IP 192.168.49.2 W0315 17:24:15.780433 1 server_others.go:565] Unknown proxy mode "", assuming iptables proxy I0315 17:24:15.799195 1 server_others.go:206] kube-proxy running in dual-stack mode, IPv4-primary I0315 17:24:15.799228 1 server_others.go:212] Using iptables Proxier. I0315 17:24:15.799238 1 server_others.go:219] creating dualStackProxier for iptables. W0315 17:24:15.799246 1 server_others.go:495] detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6 I0315 17:24:15.799680 1 server.go:647] Version: v1.21.10 I0315 17:24:15.800366 1 config.go:315] Starting service config controller I0315 17:24:15.800381 1 config.go:224] Starting endpoint slice config controller I0315 17:24:15.800389 1 shared_informer.go:240] Waiting for caches to sync for endpoint slice config I0315 17:24:15.800382 1 shared_informer.go:240] Waiting for caches to sync for service config W0315 17:24:15.802827 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice W0315 17:24:15.804500 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice I0315 17:24:15.901023 1 shared_informer.go:247] Caches are synced for endpoint slice config I0315 17:24:15.901125 1 shared_informer.go:247] Caches are synced for service config W0315 17:31:33.485277 1 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use discovery.k8s.io/v1 EndpointSlice * * ==> kube-scheduler [e60aafc8d983] <== * I0315 17:23:57.113204 1 serving.go:347] Generated self-signed cert in-memory W0315 17:23:59.216941 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA' W0315 17:23:59.216979 1 authentication.go:337] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system" W0315 17:23:59.216999 1 authentication.go:338] Continuing without authentication configuration. This may treat all requests as anonymous. W0315 17:23:59.217011 1 authentication.go:339] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false I0315 17:23:59.232564 1 secure_serving.go:202] Serving securely on 127.0.0.1:10259 I0315 17:23:59.232580 1 configmap_cafile_content.go:202] Starting client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0315 17:23:59.232605 1 shared_informer.go:240] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0315 17:23:59.232623 1 tlsconfig.go:240] Starting DynamicServingCertificateController E0315 17:23:59.306187 1 reflector.go:138] k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" E0315 17:23:59.307213 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope E0315 17:23:59.307232 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope E0315 17:23:59.307543 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope E0315 17:23:59.307736 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope E0315 17:23:59.307898 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope E0315 17:23:59.308211 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope E0315 17:23:59.308445 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope E0315 17:23:59.308598 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.CSIStorageCapacity: failed to list *v1beta1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope E0315 17:23:59.308857 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope E0315 17:23:59.308924 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope E0315 17:23:59.309051 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope E0315 17:23:59.309539 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope E0315 17:23:59.310151 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope E0315 17:24:00.111736 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope E0315 17:24:00.249805 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope E0315 17:24:00.305206 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope E0315 17:24:00.305849 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope E0315 17:24:00.371495 1 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope I0315 17:24:00.833728 1 shared_informer.go:247] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * -- Logs begin at Tue 2022-03-15 17:23:36 UTC, end at Tue 2022-03-15 17:39:08 UTC. -- Mar 15 17:24:08 minikube kubelet[2328]: I0315 17:24:08.820769 2328 setters.go:577] "Node became not ready" node="minikube" condition={Type:Ready Status:False LastHeartbeatTime:2022-03-15 17:24:08.820724827 +0000 UTC m=+6.775264642 LastTransitionTime:2022-03-15 17:24:08.820724827 +0000 UTC m=+6.775264642 Reason:KubeletNotReady Message:container runtime status check may not have completed yet} Mar 15 17:24:08 minikube kubelet[2328]: I0315 17:24:08.984962 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:08 minikube kubelet[2328]: I0315 17:24:08.985070 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:08 minikube kubelet[2328]: I0315 17:24:08.985105 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:08 minikube kubelet[2328]: I0315 17:24:08.985131 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021128 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/c58863a1a7ff0c936f748b4dba5954ff-usr-share-ca-certificates\") pod \"kube-apiserver-minikube\" (UID: \"c58863a1a7ff0c936f748b4dba5954ff\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021203 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-flexvolume-dir\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021234 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-kubeconfig\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021256 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/be5cbc7ffcadbd4ffc776526843ee514-etcd-certs\") pod \"etcd-minikube\" (UID: \"be5cbc7ffcadbd4ffc776526843ee514\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021286 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/c58863a1a7ff0c936f748b4dba5954ff-ca-certs\") pod \"kube-apiserver-minikube\" (UID: \"c58863a1a7ff0c936f748b4dba5954ff\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021321 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/c58863a1a7ff0c936f748b4dba5954ff-etc-ca-certificates\") pod \"kube-apiserver-minikube\" (UID: \"c58863a1a7ff0c936f748b4dba5954ff\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021345 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-etc-ca-certificates\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021363 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-usr-local-share-ca-certificates\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021378 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-usr-share-ca-certificates\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021394 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/8e3b2dfb3b440094a33cc4af33f6136d-kubeconfig\") pod \"kube-scheduler-minikube\" (UID: \"8e3b2dfb3b440094a33cc4af33f6136d\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021409 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/be5cbc7ffcadbd4ffc776526843ee514-etcd-data\") pod \"etcd-minikube\" (UID: \"be5cbc7ffcadbd4ffc776526843ee514\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021423 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/c58863a1a7ff0c936f748b4dba5954ff-k8s-certs\") pod \"kube-apiserver-minikube\" (UID: \"c58863a1a7ff0c936f748b4dba5954ff\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021438 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/c58863a1a7ff0c936f748b4dba5954ff-usr-local-share-ca-certificates\") pod \"kube-apiserver-minikube\" (UID: \"c58863a1a7ff0c936f748b4dba5954ff\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021453 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-ca-certs\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021500 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/dc9f3f1b6812bfe7a9cbdf05a1f4b5d4-k8s-certs\") pod \"kube-controller-manager-minikube\" (UID: \"dc9f3f1b6812bfe7a9cbdf05a1f4b5d4\") " Mar 15 17:24:09 minikube kubelet[2328]: I0315 17:24:09.021570 2328 reconciler.go:157] "Reconciler: start to sync state" Mar 15 17:24:14 minikube kubelet[2328]: I0315 17:24:14.561996 2328 kuberuntime_manager.go:1044] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" Mar 15 17:24:14 minikube kubelet[2328]: I0315 17:24:14.562379 2328 docker_service.go:363] "Docker cri received runtime config" runtimeConfig="&RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" Mar 15 17:24:14 minikube kubelet[2328]: I0315 17:24:14.562555 2328 kubelet_network.go:76] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.134461 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.187021 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/7e0001c4-3cb1-4f6e-90f4-04b03d73558c-kube-proxy\") pod \"kube-proxy-n898j\" (UID: \"7e0001c4-3cb1-4f6e-90f4-04b03d73558c\") " Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.187085 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/7e0001c4-3cb1-4f6e-90f4-04b03d73558c-xtables-lock\") pod \"kube-proxy-n898j\" (UID: \"7e0001c4-3cb1-4f6e-90f4-04b03d73558c\") " Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.187108 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txjcr\" (UniqueName: \"kubernetes.io/projected/7e0001c4-3cb1-4f6e-90f4-04b03d73558c-kube-api-access-txjcr\") pod \"kube-proxy-n898j\" (UID: \"7e0001c4-3cb1-4f6e-90f4-04b03d73558c\") " Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.187126 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7e0001c4-3cb1-4f6e-90f4-04b03d73558c-lib-modules\") pod \"kube-proxy-n898j\" (UID: \"7e0001c4-3cb1-4f6e-90f4-04b03d73558c\") " Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.532716 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.589466 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61fd4828-e32d-4da8-ac70-b1754f206a8b-config-volume\") pod \"coredns-558bd4d5db-g6xct\" (UID: \"61fd4828-e32d-4da8-ac70-b1754f206a8b\") " Mar 15 17:24:15 minikube kubelet[2328]: I0315 17:24:15.589511 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzvxp\" (UniqueName: \"kubernetes.io/projected/61fd4828-e32d-4da8-ac70-b1754f206a8b-kube-api-access-kzvxp\") pod \"coredns-558bd4d5db-g6xct\" (UID: \"61fd4828-e32d-4da8-ac70-b1754f206a8b\") " Mar 15 17:24:16 minikube kubelet[2328]: I0315 17:24:16.171485 2328 docker_sandbox.go:401] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for kube-system/coredns-558bd4d5db-g6xct through plugin: invalid network status for" Mar 15 17:24:16 minikube kubelet[2328]: I0315 17:24:16.753703 2328 docker_sandbox.go:401] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for kube-system/coredns-558bd4d5db-g6xct through plugin: invalid network status for" Mar 15 17:24:17 minikube kubelet[2328]: I0315 17:24:17.761810 2328 prober_manager.go:255] "Failed to trigger a manual run" probe="Readiness" Mar 15 17:24:23 minikube kubelet[2328]: I0315 17:24:23.541505 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:24:23 minikube kubelet[2328]: I0315 17:24:23.551356 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/c86bb5b6-08dc-4ae7-9b0f-32279284627a-tmp\") pod \"storage-provisioner\" (UID: \"c86bb5b6-08dc-4ae7-9b0f-32279284627a\") " Mar 15 17:24:23 minikube kubelet[2328]: I0315 17:24:23.551452 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5dwr\" (UniqueName: \"kubernetes.io/projected/c86bb5b6-08dc-4ae7-9b0f-32279284627a-kube-api-access-h5dwr\") pod \"storage-provisioner\" (UID: \"c86bb5b6-08dc-4ae7-9b0f-32279284627a\") " Mar 15 17:29:08 minikube kubelet[2328]: W0315 17:29:08.584400 2328 sysinfo.go:203] Nodes topology is not available, providing CPU topology Mar 15 17:31:55 minikube kubelet[2328]: I0315 17:31:55.538480 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:31:55 minikube kubelet[2328]: I0315 17:31:55.688409 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2\" (UniqueName: \"kubernetes.io/host-path/fc00fa6b-a294-4e13-a9f9-37c144a2cbf1-pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2\") pod \"postgres-postgresql-0\" (UID: \"fc00fa6b-a294-4e13-a9f9-37c144a2cbf1\") " Mar 15 17:31:55 minikube kubelet[2328]: I0315 17:31:55.688473 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dshm\" (UniqueName: \"kubernetes.io/empty-dir/fc00fa6b-a294-4e13-a9f9-37c144a2cbf1-dshm\") pod \"postgres-postgresql-0\" (UID: \"fc00fa6b-a294-4e13-a9f9-37c144a2cbf1\") " Mar 15 17:31:55 minikube kubelet[2328]: I0315 17:31:55.688587 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69jdd\" (UniqueName: \"kubernetes.io/projected/fc00fa6b-a294-4e13-a9f9-37c144a2cbf1-kube-api-access-69jdd\") pod \"postgres-postgresql-0\" (UID: \"fc00fa6b-a294-4e13-a9f9-37c144a2cbf1\") " Mar 15 17:31:56 minikube kubelet[2328]: I0315 17:31:56.101092 2328 docker_sandbox.go:401] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for default/postgres-postgresql-0 through plugin: invalid network status for" Mar 15 17:31:56 minikube kubelet[2328]: I0315 17:31:56.340285 2328 docker_sandbox.go:401] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for default/postgres-postgresql-0 through plugin: invalid network status for" Mar 15 17:31:56 minikube kubelet[2328]: I0315 17:31:56.971002 2328 topology_manager.go:187] "Topology Admit Handler" Mar 15 17:31:57 minikube kubelet[2328]: I0315 17:31:57.163011 2328 reconciler.go:224] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gf65\" (UniqueName: \"kubernetes.io/projected/889ec6a1-01ef-4859-9bce-3d8864fe8742-kube-api-access-5gf65\") pod \"my-service-545bddb88-txzgl\" (UID: \"889ec6a1-01ef-4859-9bce-3d8864fe8742\") " Mar 15 17:31:57 minikube kubelet[2328]: I0315 17:31:57.289227 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:31:57 minikube kubelet[2328]: I0315 17:31:57.535686 2328 pod_container_deletor.go:79] "Container not found in pod's containers" containerID="e9341b18e1d33858b407d70e07ab96f5c39cf958ed2b427a794f2894985c7604" Mar 15 17:31:57 minikube kubelet[2328]: I0315 17:31:57.536988 2328 docker_sandbox.go:401] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for default/my-service-545bddb88-txzgl through plugin: invalid network status for" Mar 15 17:31:58 minikube kubelet[2328]: I0315 17:31:58.540650 2328 docker_sandbox.go:401] "Failed to read pod IP from plugin/docker" err="Couldn't find network status for default/my-service-545bddb88-txzgl through plugin: invalid network status for" Mar 15 17:31:58 minikube kubelet[2328]: I0315 17:31:58.543578 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:31:59 minikube kubelet[2328]: I0315 17:31:59.548462 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:33:07 minikube kubelet[2328]: I0315 17:33:07.284618 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:34:08 minikube kubelet[2328]: W0315 17:34:08.374562 2328 sysinfo.go:203] Nodes topology is not available, providing CPU topology Mar 15 17:34:35 minikube kubelet[2328]: I0315 17:34:35.221783 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:35:37 minikube kubelet[2328]: I0315 17:35:37.179878 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:36:40 minikube kubelet[2328]: I0315 17:36:40.138667 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:37:56 minikube kubelet[2328]: I0315 17:37:56.096007 2328 kubelet_pods.go:895] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/my-service-545bddb88-txzgl" secret="" err="secret \"ecr_creds\" not found" Mar 15 17:39:08 minikube kubelet[2328]: W0315 17:39:08.164102 2328 sysinfo.go:203] Nodes topology is not available, providing CPU topology * * ==> storage-provisioner [f88d62b00b23] <== * I0315 17:24:24.103467 1 storage_provisioner.go:116] Initializing the minikube storage provisioner... I0315 17:24:24.115869 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service! I0315 17:24:24.115927 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath... I0315 17:24:24.128115 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath I0315 17:24:24.128266 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_minikube_392c621c-0ac0-4cf9-a5ed-b3aba1d038f3! I0315 17:24:24.128280 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"c2fcc548-5581-4777-867d-59a19d7bd52f", APIVersion:"v1", ResourceVersion:"468", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' minikube_392c621c-0ac0-4cf9-a5ed-b3aba1d038f3 became leader I0315 17:24:24.229420 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_minikube_392c621c-0ac0-4cf9-a5ed-b3aba1d038f3! I0315 17:31:54.063259 1 controller.go:1332] provision "default/data-postgres-postgresql-0" class "standard": started I0315 17:31:54.063324 1 storage_provisioner.go:61] Provisioning volume {&StorageClass{ObjectMeta:{standard 68dc8509-5878-4634-ab26-8ab2a852f557 283 0 2022-03-15 17:24:04 +0000 UTC map[addonmanager.kubernetes.io/mode:EnsureExists] map[kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"},"labels":{"addonmanager.kubernetes.io/mode":"EnsureExists"},"name":"standard"},"provisioner":"k8s.io/minikube-hostpath"} storageclass.kubernetes.io/is-default-class:true] [] [] [{kubectl-client-side-apply Update storage.k8s.io/v1 2022-03-15 17:24:04 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:kubectl.kubernetes.io/last-applied-configuration":{},"f:storageclass.kubernetes.io/is-default-class":{}},"f:labels":{".":{},"f:addonmanager.kubernetes.io/mode":{}}},"f:provisioner":{},"f:reclaimPolicy":{},"f:volumeBindingMode":{}}}]},Provisioner:k8s.io/minikube-hostpath,Parameters:map[string]string{},ReclaimPolicy:*Delete,MountOptions:[],AllowVolumeExpansion:nil,VolumeBindingMode:*Immediate,AllowedTopologies:[]TopologySelectorTerm{},} pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2 &PersistentVolumeClaim{ObjectMeta:{data-postgres-postgresql-0 default 00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2 805 0 2022-03-15 17:31:54 +0000 UTC map[app.kubernetes.io/component:primary app.kubernetes.io/instance:postgres app.kubernetes.io/name:postgresql] map[volume.beta.kubernetes.io/storage-provisioner:k8s.io/minikube-hostpath] [] [kubernetes.io/pvc-protection] [{kube-controller-manager Update v1 2022-03-15 17:31:54 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volume.beta.kubernetes.io/storage-provisioner":{}},"f:labels":{".":{},"f:app.kubernetes.io/component":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/name":{}}},"f:spec":{"f:accessModes":{},"f:resources":{"f:requests":{".":{},"f:storage":{}}},"f:volumeMode":{}}}}]},Spec:PersistentVolumeClaimSpec{AccessModes:[ReadWriteOnce],Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{storage: {{8589934592 0} {} BinarySI},},},VolumeName:,Selector:nil,StorageClassName:*standard,VolumeMode:*Filesystem,DataSource:nil,},Status:PersistentVolumeClaimStatus{Phase:Pending,AccessModes:[],Capacity:ResourceList{},Conditions:[]PersistentVolumeClaimCondition{},},} nil} to /tmp/hostpath-provisioner/default/data-postgres-postgresql-0 I0315 17:31:54.064283 1 controller.go:1439] provision "default/data-postgres-postgresql-0" class "standard": volume "pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2" provisioned I0315 17:31:54.064330 1 controller.go:1456] provision "default/data-postgres-postgresql-0" class "standard": succeeded I0315 17:31:54.064338 1 volume_store.go:212] Trying to save persistentvolume "pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2" I0315 17:31:54.065295 1 event.go:282] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"default", Name:"data-postgres-postgresql-0", UID:"00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2", APIVersion:"v1", ResourceVersion:"805", FieldPath:""}): type: 'Normal' reason: 'Provisioning' External provisioner is provisioning volume for claim "default/data-postgres-postgresql-0" I0315 17:31:54.077922 1 volume_store.go:219] persistentvolume "pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2" saved I0315 17:31:54.078474 1 event.go:282] Event(v1.ObjectReference{Kind:"PersistentVolumeClaim", Namespace:"default", Name:"data-postgres-postgresql-0", UID:"00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2", APIVersion:"v1", ResourceVersion:"805", FieldPath:""}): type: 'Normal' reason: 'ProvisioningSucceeded' Successfully provisioned volume pvc-00bd8cc4-8f82-4ae2-a89a-673d3c5f58a2